Overview

This project aims to reproduce the quantitative results obtained in (Chen et al. 2024). The paper is published in Nature Humanities & Social Sciences Communication Journal (Q1). The paper is rich in terms of the quantitative methods employed which I think is very instructive. The substance of this paper is more relevant to management and policy research fields.

Theoretical Contributions

The theoretical contribution of this paper is as follows:

Hypotheses

One hypothesis is presented in this paper which is another formulation of the research question mentioned in the title:

Methods :

Quantitative Replication

#loading libraries and data

library(tidyverse)
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr     1.1.4     ✔ readr     2.1.5
## ✔ forcats   1.0.0     ✔ stringr   1.5.1
## ✔ ggplot2   3.5.1     ✔ tibble    3.2.1
## ✔ lubridate 1.9.3     ✔ tidyr     1.3.1
## ✔ purrr     1.0.2     
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag()    masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(haven)
library(correlation)

path1<-file.path("C:\\Users\\hassa\\OneDrive\\Documents\\R\\Datasets\\data1.dta")
data1<-read_dta(path1)
head(data1) # We take a look on the data
## # A tibble: 6 × 68
##    code  year prov  city  ind   Treat  Post Treat_post  time Invention
##   <dbl> <dbl> <chr> <chr> <chr> <dbl> <dbl>      <dbl> <dbl>     <dbl>
## 1    17  2012 广东  深圳  C37       0     0          0  2017         0
## 2    17  2013 广东  深圳  C37       0     0          0  2017         0
## 3    17  2014 广东  深圳  C37       0     0          0  2017         0
## 4    17  2015 广东  深圳  C37       0     0          0  2017         0
## 5    17  2016 广东  深圳  C37       0     0          0  2017         0
## 6    17  2017 广东  深圳  C37       0     1          0  2017         0
## # ℹ 58 more variables: Non_Invention <dbl>, Size <dbl>, Age <dbl>, ROA <dbl>,
## #   Lev <dbl>, RD <dbl>, Pee <dbl>, Inst <dbl>, HHI <dbl>, KZ <dbl>, CFC <dbl>,
## #   Capex <dbl>, NWC <dbl>, SOE <dbl>, Tax <dbl>, Subsidy <dbl>,
## #   Financing <dbl>, Collaboration <dbl>, Talent <dbl>, TQ <dbl>, TFP_LP <dbl>,
## #   Treat_Before4 <dbl>, Treat_Before3 <dbl>, Treat_Before2 <dbl>,
## #   Treat_Current <dbl>, Treat_After1 <dbl>, Treat_After2 <dbl>,
## #   Treat_After3 <dbl>, `_est_in4` <dbl>, High_Tech <dbl>, IMPIND <dbl>, …

Table 1: Descriptive Statistics

#Displaying Descriptive Statistics
# Get the skim summary
library(skimr)
skim_summary <- skim(data1[c("Invention", "Non_Invention", "Treat_post", "Size", "Age", "ROA", "Lev", "RD", "Pee", "Inst", "HHI", "KZ", "CFC", "Capex", "NWC", "SOE")])

# Format the numeric columns to three decimal places
format_skim <- function(skim_summary) {
  skim_summary %>%
    mutate(across(where(is.numeric), ~ sprintf("%.3f", .)))
}

# Apply the formatting
formatted_skim <- format_skim(skim_summary)

# Print the formatted summary
print(formatted_skim)
## ── Data Summary ────────────────────────
##                            Values
## Name                       ...[] 
## Number of rows             4422  
## Number of columns          16    
## _______________________          
## Column type frequency:           
##   numeric                  16    
## ________________________         
## Group variables            None  
## 
## ── Variable type: numeric ──────────────────────────────────────────────────────
##    skim_variable n_missing complete_rate mean   sd    p0     p25    p50   
##  1 Invention     0.000     1.000         1.745  1.673 0.000  0.000  1.386 
##  2 Non_Invention 0.000     1.000         1.628  1.547 0.000  0.000  1.609 
##  3 Treat_post    0.000     1.000         0.302  0.459 0.000  0.000  0.000 
##  4 Size          0.000     1.000         22.196 1.194 19.796 21.380 22.082
##  5 Age           0.000     1.000         2.318  0.638 0.693  1.946  2.398 
##  6 ROA           0.000     1.000         0.030  0.067 -0.310 0.011  0.033 
##  7 Lev           0.000     1.000         0.408  0.191 0.056  0.258  0.401 
##  8 RD            0.000     1.000         0.056  0.044 0.000  0.033  0.044 
##  9 Pee           0.000     1.000         0.182  0.101 0.019  0.105  0.167 
## 10 Inst          0.000     1.000         0.399  0.240 0.002  0.190  0.412 
## 11 HHI           0.000     1.000         0.073  0.054 0.019  0.034  0.053 
## 12 KZ            0.000     1.000         1.332  2.000 -4.731 0.183  1.508 
## 13 CFC           0.000     1.000         -0.002 0.097 -0.408 -0.025 0.013 
## 14 Capex         0.000     1.000         0.042  0.037 0.001  0.017  0.032 
## 15 NWC           0.000     1.000         0.272  0.210 -0.226 0.119  0.261 
## 16 SOE           0.000     1.000         0.299  0.458 0.000  0.000  0.000 
##    p75    p100   hist 
##  1 2.708  6.394  ▇▅▃▁▁
##  2 2.773  6.293  ▇▅▃▁▁
##  3 1.000  1.000  ▇▁▁▁▃
##  4 22.866 25.738 ▃▇▆▂▁
##  5 2.833  3.332  ▂▂▆▇▆
##  6 0.061  0.185  ▁▁▁▇▂
##  7 0.549  0.896  ▆▇▇▅▂
##  8 0.066  0.265  ▇▃▁▁▁
##  9 0.240  0.490  ▆▇▅▂▁
## 10 0.583  0.905  ▇▆▇▆▃
## 11 0.088  0.232  ▇▃▁▁▁
## 12 2.599  6.347  ▁▃▇▇▁
## 13 0.049  0.206  ▁▁▂▇▂
## 14 0.056  0.188  ▇▃▁▁▁
## 15 0.416  0.762  ▂▆▇▆▂
## 16 1.000  1.000  ▇▁▁▁▃
if (!requireNamespace("skimr", quietly = TRUE)) install.packages("skimr")
library(skimr)
# Function to format numeric values to three decimal places
format_skim <- function(skim_summary) {
  skim_summary %>%
    mutate(across(where(is.numeric), ~ sprintf("%.3f", .)))
}

# Skim summary for Treat == 1
skim_treat_1 <- skim(data1[data1$Treat == 1, c("Invention", "Non_Invention", "Treat_post", "Size", "Age", "ROA", "Lev", "RD", "Pee", "Inst", "HHI", "KZ", "CFC", "Capex", "NWC", "SOE")])
formatted_skim_treat_1 <- format_skim(skim_treat_1)
print("Descriptive Statistics for Treat == 1:")
## [1] "Descriptive Statistics for Treat == 1:"
print(formatted_skim_treat_1)
## ── Data Summary ────────────────────────
##                            Values
## Name                       ...[] 
## Number of rows             2211  
## Number of columns          16    
## _______________________          
## Column type frequency:           
##   numeric                  16    
## ________________________         
## Group variables            None  
## 
## ── Variable type: numeric ──────────────────────────────────────────────────────
##    skim_variable n_missing complete_rate mean   sd    p0     p25    p50   
##  1 Invention     0.000     1.000         1.894  1.803 0.000  0.000  1.609 
##  2 Non_Invention 0.000     1.000         1.562  1.475 0.000  0.000  1.609 
##  3 Treat_post    0.000     1.000         0.605  0.489 0.000  0.000  1.000 
##  4 Size          0.000     1.000         22.206 1.201 19.796 21.346 22.122
##  5 Age           0.000     1.000         2.328  0.640 0.693  1.946  2.398 
##  6 ROA           0.000     1.000         0.029  0.067 -0.310 0.011  0.033 
##  7 Lev           0.000     1.000         0.418  0.196 0.056  0.259  0.414 
##  8 RD            0.000     1.000         0.055  0.041 0.000  0.033  0.044 
##  9 Pee           0.000     1.000         0.185  0.097 0.019  0.112  0.172 
## 10 Inst          0.000     1.000         0.404  0.240 0.002  0.206  0.414 
## 11 HHI           0.000     1.000         0.076  0.053 0.019  0.042  0.061 
## 12 KZ            0.000     1.000         1.387  2.006 -4.731 0.182  1.553 
## 13 CFC           0.000     1.000         -0.001 0.095 -0.408 -0.025 0.013 
## 14 Capex         0.000     1.000         0.042  0.036 0.001  0.017  0.032 
## 15 NWC           0.000     1.000         0.265  0.210 -0.226 0.111  0.261 
## 16 SOE           0.000     1.000         0.290  0.454 0.000  0.000  0.000 
##    p75    p100   hist 
##  1 2.944  6.394  ▇▅▃▁▂
##  2 2.773  6.293  ▇▅▃▁▁
##  3 1.000  1.000  ▅▁▁▁▇
##  4 22.882 25.738 ▃▇▇▂▁
##  5 2.833  3.332  ▂▂▆▇▆
##  6 0.060  0.185  ▁▁▁▇▁
##  7 0.568  0.896  ▆▇▇▆▂
##  8 0.065  0.265  ▇▃▁▁▁
##  9 0.239  0.490  ▅▇▅▂▁
## 10 0.590  0.905  ▇▆▇▆▃
## 11 0.092  0.232  ▇▅▁▁▁
## 12 2.703  6.347  ▁▃▇▇▁
## 13 0.048  0.206  ▁▁▂▇▂
## 14 0.057  0.188  ▇▃▁▁▁
## 15 0.410  0.762  ▂▇▇▆▂
## 16 1.000  1.000  ▇▁▁▁▃
# Skim summary for Treat == 0
skim_treat_0 <- skim(data1[data1$Treat == 0, c("Invention", "Non_Invention", "Treat_post", "Size", "Age", "ROA", "Lev", "RD", "Pee", "Inst", "HHI", "KZ", "CFC", "Capex", "NWC", "SOE")])
formatted_skim_treat_0 <- format_skim(skim_treat_0)
print("Descriptive Statistics for Treat == 0:")
## [1] "Descriptive Statistics for Treat == 0:"
print(formatted_skim_treat_0)
## ── Data Summary ────────────────────────
##                            Values
## Name                       ...[] 
## Number of rows             2211  
## Number of columns          16    
## _______________________          
## Column type frequency:           
##   numeric                  16    
## ________________________         
## Group variables            None  
## 
## ── Variable type: numeric ──────────────────────────────────────────────────────
##    skim_variable n_missing complete_rate mean   sd    p0     p25    p50   
##  1 Invention     0.000     1.000         1.596  1.517 0.000  0.000  1.386 
##  2 Non_Invention 0.000     1.000         1.694  1.614 0.000  0.000  1.609 
##  3 Treat_post    0.000     1.000         0.000  0.000 0.000  0.000  0.000 
##  4 Size          0.000     1.000         22.185 1.186 19.796 21.419 22.054
##  5 Age           0.000     1.000         2.307  0.636 0.693  1.946  2.398 
##  6 ROA           0.000     1.000         0.031  0.068 -0.310 0.011  0.034 
##  7 Lev           0.000     1.000         0.398  0.186 0.056  0.257  0.391 
##  8 RD            0.000     1.000         0.057  0.046 0.000  0.032  0.044 
##  9 Pee           0.000     1.000         0.180  0.105 0.019  0.098  0.164 
## 10 Inst          0.000     1.000         0.393  0.240 0.002  0.179  0.407 
## 11 HHI           0.000     1.000         0.070  0.055 0.019  0.031  0.049 
## 12 KZ            0.000     1.000         1.277  1.993 -4.731 0.188  1.469 
## 13 CFC           0.000     1.000         -0.003 0.100 -0.408 -0.025 0.014 
## 14 Capex         0.000     1.000         0.043  0.037 0.001  0.017  0.032 
## 15 NWC           0.000     1.000         0.279  0.210 -0.226 0.127  0.260 
## 16 SOE           0.000     1.000         0.307  0.461 0.000  0.000  0.000 
##    p75    p100   hist 
##  1 2.565  6.394  ▇▅▃▁▁
##  2 2.803  6.293  ▇▅▃▁▁
##  3 0.000  0.000  ▁▁▇▁▁
##  4 22.844 25.738 ▃▇▆▂▁
##  5 2.833  3.332  ▂▂▆▇▆
##  6 0.063  0.185  ▁▁▁▇▂
##  7 0.528  0.896  ▅▇▇▅▁
##  8 0.067  0.265  ▇▃▁▁▁
##  9 0.241  0.490  ▇▇▅▂▁
## 10 0.576  0.905  ▇▆▇▆▂
## 11 0.083  0.232  ▇▃▁▁▁
## 12 2.513  6.347  ▁▃▇▆▁
## 13 0.049  0.206  ▁▁▂▇▂
## 14 0.056  0.188  ▇▃▁▁▁
## 15 0.425  0.762  ▂▆▇▆▂
## 16 1.000  1.000  ▇▁▁▁▃

Table 2: Correlation Matrix

# Load necessary libraries
library(modelsummary)
## `modelsummary` 2.0.0 now uses `tinytable` as its default table-drawing
##   backend. Learn more at: https://vincentarelbundock.github.io/tinytable/
## 
## Revert to `kableExtra` for one session:
## 
##   options(modelsummary_factory_default = 'kableExtra')
##   options(modelsummary_factory_latex = 'kableExtra')
##   options(modelsummary_factory_html = 'kableExtra')
## 
## Silence this message forever:
## 
##   config_modelsummary(startup_message = FALSE)
if (requireNamespace("correlation", quietly = TRUE)){
  cor.df <- data1 %>%
     select(Invention, Non_Invention, Treat_post, Size, Age, ROA, Lev, RD, Pee, Inst, HHI, KZ,               CFC, Capex, NWC, SOE)
  
  co <- correlation::correlation(cor.df)
  modelsummary::datasummary_correlation(co)

  # add stars to easycorrelation objects
  modelsummary::datasummary_correlation(co, stars = TRUE)
}
Invention Non_Invention Treat_post Size Age ROA Lev RD Pee Inst HHI KZ CFC Capex NWC SOE
Invention 1 . . . . . . . . . . . . . . .
Non_Invention .52*** 1 . . . . . . . . . . . . . .
Treat_post .12*** -.11*** 1 . . . . . . . . . . . . .
Size .34*** .18*** .13*** 1 . . . . . . . . . . . .
Age .03 -.12*** .26*** .41*** 1 . . . . . . . . . . .
ROA .09*** .07*** -.10*** .07*** -.08*** 1 . . . . . . . . . .
Lev .13*** .09*** .10*** .46*** .35*** -.31*** 1 . . . . . . . . .
RD .13*** .04 .03 -.10*** -.12*** -.11*** -.22*** 1 . . . . . . . .
Pee -.07*** -.01 -.03 -.04 -.04 -.09*** .06** -.11*** 1 . . . . . . .
Inst .14*** .05* -.03 .40*** .21*** .18*** .17*** -.12*** .01 1 . . . . . .
HHI .03 .12*** -.05+ .06** -.03 -.03 .17*** -.17*** .09*** .07*** 1 . . . . .
KZ -.04 -.06** .11*** .06** .25*** -.49*** .65*** -.05+ .06*** -.02 .07*** 1 . . . .
CFC .06** .04 .00 .07*** .05* .27*** .02 -.04 .03 -.01 .01 -.02 1 . . .
Capex .02 .04 -.11*** .01 -.24*** .12*** -.04 .06** .35*** .05* .04 -.07*** -.10*** 1 . .
NWC -.02 -.01 -.09*** -.35*** -.33*** .33*** -.77*** .21*** -.37*** -.10*** -.16*** -.57*** -.12*** -.13*** 1 .
SOE .15*** .08*** -.01 .29*** .40*** .01 .22*** -.07*** -.04 .41*** .10*** .12*** .04 -.15*** -.15*** 1

Table 4: Results of MIC 2025 pilot cities and firm innovation

# Install and load necessary packages
# Install and load necessary packages
if (!requireNamespace("modelsummary", quietly = TRUE)) install.packages("modelsummary")
if (!requireNamespace("plm", quietly = TRUE)) install.packages("plm")
if (!requireNamespace("lmtest", quietly = TRUE)) install.packages("lmtest")
if (!requireNamespace("sandwich", quietly = TRUE)) install.packages("sandwich")

library(modelsummary)
library(plm)
## 
## Attaching package: 'plm'
## The following objects are masked from 'package:dplyr':
## 
##     between, lag, lead
library(lmtest)
## Loading required package: zoo
## 
## Attaching package: 'zoo'
## The following objects are masked from 'package:base':
## 
##     as.Date, as.Date.numeric
library(sandwich)

# Fit your regression models
model1 <- plm(Invention ~ Treat_post + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC + SOE + factor(year), 
              data = data1, 
              index = "code", 
              model = "within")
## Warning in pdata.frame(data, index = index, ...): column 'time' overwritten by
## time index
model2 <- plm(Non_Invention ~ Treat_post + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC + SOE + factor(year), 
              data = data1, 
              index = "code", 
              model = "within")
## Warning in pdata.frame(data, index = index, ...): column 'time' overwritten by
## time index
# Calculate clustered standard errors
vcov_model1 <- vcovHC(model1, type = "HC1", cluster = "group")
vcov_model2 <- vcovHC(model2, type = "HC1", cluster = "group")

# Combine models into a list
model_list <- list(
  "Model 1" = model1,
  "Model 2" = model2
)

# Create a summary table using modelsummary
modelsummary(
  model_list,
  vcov = list(vcov_model1, vcov_model2),  # Provide clustered standard errors
  stars = TRUE,  # Add significance stars
  digits = 3,    # Set number of decimal places
  output = "gt",  # Output LaTeX code
  title = "Regression Results",  # Table title
  custom.note = "Standard errors are in parentheses. * p<0.1, ** p<0.05, *** p<0.01"  # Custom note
)
Regression Results
Model 1 Model 2
Treat_post 0.395*** -0.325**
(0.087) (0.108)
Size 0.135+ 0.133
(0.071) (0.086)
Age -0.099 0.278
(0.145) (0.186)
ROA 0.532 0.466
(0.391) (0.319)
Lev 0.419 0.398
(0.361) (0.391)
RD 2.346* 1.449+
(0.934) (0.829)
Pee 0.086 0.882+
(0.398) (0.460)
Inst -0.082 -0.048
(0.239) (0.226)
HHI -1.646 -2.098
(1.236) (1.538)
KZ -0.014 -0.035*
(0.019) (0.017)
CFC 0.509* 0.331+
(0.201) (0.177)
Capex 2.150** 1.360*
(0.711) (0.674)
NWC 0.453 0.485
(0.316) (0.342)
SOE -0.169 -0.046
(0.155) (0.153)
factor(year)2013 0.074 -0.126+
(0.055) (0.066)
factor(year)2014 0.108 -0.158
(0.079) (0.097)
factor(year)2015 0.145 -0.155
(0.100) (0.116)
factor(year)2016 0.104 -0.213
(0.114) (0.148)
factor(year)2017 0.089 -0.349*
(0.142) (0.162)
factor(year)2018 0.206 -0.434*
(0.148) (0.181)
factor(year)2019 0.203 -0.424*
(0.159) (0.196)
factor(year)2020 0.219 -0.396+
(0.164) (0.210)
factor(year)2021 0.046 -0.433+
(0.172) (0.228)
factor(year)2022 -0.261 -0.834***
(0.179) (0.240)
Num.Obs. 4422 4422
R2 0.043 0.052
R2 Adj. -0.059 -0.049
AIC 12932.2 11722.2
BIC 13092.1 11882.1
RMSE 1.04 0.91
Std.Errors Custom Custom
+ p < 0.1, * p < 0.05, ** p < 0.01, *** p < 0.001

Table 5: Results of the parallel trend tests

# Install and load modelsummary if not already installed
if (!requireNamespace("modelsummary", quietly = TRUE)) install.packages("modelsummary")
library(modelsummary)

# Assuming 'data1' is your data frame and 'code' is the clustering variable

# Model for Invention
model_fe_invention <- plm(
  Invention ~ Treat_Before4 + Treat_Before3 + Treat_Before2 + Treat_Current + Treat_After1 + 
    Treat_After2 + Treat_After3 + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + 
    Capex + NWC + SOE + factor(year),
  data = data1,
  index = c("code", "year"),
  model = "within"
)

# Clustered standard errors for Invention model
cluster_vcov_invention <- vcovHC(model_fe_invention, type = "HC1", cluster = "group")

# Model for Non_Invention
model_fe_non_invention <- plm(
  Non_Invention ~ Treat_Before4 + Treat_Before3 + Treat_Before2 + Treat_Current + Treat_After1 + 
    Treat_After2 + Treat_After3 + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + 
    Capex + NWC + SOE + factor(year),
  data = data1,
  index = c("code", "year"),
  model = "within"
)

# Clustered standard errors for Non_Invention model
cluster_vcov_non_invention <- vcovHC(model_fe_non_invention, type = "HC1", cluster = "group")

# Create a list of models
model_list <- list(
  "Model 1: Invention" = model_fe_invention,
  "Model 2: Non_Invention" = model_fe_non_invention
)

# Create a summary table using modelsummary with clustered standard errors
modelsummary(
  model_list,
  vcov = list(cluster_vcov_invention, cluster_vcov_non_invention),  # Provide clustered standard errors
  stars = TRUE,  # Add significance stars
  digits = 3,    # Set number of decimal places
  output = "gt",  # Output LaTeX code
  title = "Regression Results",  # Table title
  custom.note = "Standard errors are in parentheses. * p<0.1, ** p<0.05, *** p<0.01"  # Custom note
)
Regression Results
Model 1: Invention Model 2: Non_Invention
Treat_Before4 -0.106 -0.010
(0.100) (0.123)
Treat_Before3 0.014 -0.054
(0.082) (0.119)
Treat_Before2 -0.045 -0.083
(0.063) (0.091)
Treat_Current 0.263* -0.327**
(0.113) (0.126)
Treat_After1 0.419** -0.411**
(0.139) (0.125)
Treat_After2 0.491** -0.421**
(0.150) (0.136)
Treat_After3 0.348** -0.338*
(0.119) (0.134)
Size 0.135+ 0.134
(0.071) (0.086)
Age -0.098 0.278
(0.145) (0.186)
ROA 0.553 0.454
(0.392) (0.318)
Lev 0.435 0.396
(0.361) (0.392)
RD 2.350* 1.453+
(0.935) (0.829)
Pee 0.104 0.878+
(0.400) (0.463)
Inst -0.086 -0.043
(0.239) (0.226)
HHI -1.611 -2.093
(1.240) (1.538)
KZ -0.014 -0.035*
(0.019) (0.017)
CFC 0.504* 0.334+
(0.201) (0.177)
Capex 2.115** 1.363*
(0.713) (0.675)
NWC 0.457 0.486
(0.315) (0.343)
SOE -0.170 -0.045
(0.156) (0.153)
factor(year)2013 0.034 -0.112
(0.063) (0.074)
factor(year)2014 0.066 -0.127
(0.086) (0.105)
factor(year)2015 0.099 -0.146
(0.102) (0.121)
factor(year)2016 0.093 -0.218
(0.118) (0.155)
factor(year)2017 0.051 -0.326+
(0.144) (0.168)
factor(year)2018 0.117 -0.393*
(0.147) (0.186)
factor(year)2019 0.148 -0.408*
(0.160) (0.200)
factor(year)2020 0.188 -0.395+
(0.167) (0.216)
factor(year)2021 0.016 -0.432+
(0.174) (0.233)
factor(year)2022 -0.292 -0.833***
(0.182) (0.246)
Num.Obs. 4422 4422
R2 0.044 0.053
R2 Adj. -0.060 -0.050
AIC 12938.6 11731.8
BIC 13136.8 11930.1
RMSE 1.04 0.91
Std.Errors Custom Custom
+ p < 0.1, * p < 0.05, ** p < 0.01, *** p < 0.001

Table 6 : Endogeneity test: city specific factors Logit model

path2<-file.path("C:\\Users\\hassa\\OneDrive\\Documents\\R\\Datasets\\data2.dta")
data2<-read_dta(path2)

# Fit the logistic regression model using glm() with family = binomial for logit model
logit_model <- glm(
  Treat ~ GDP_per_capita + GDP_growth_rate + Science_and_Technology + LnPopulation + 
    Research_Employment + Infra_Spending + Apply_Patent + Obtain_Patent,
  data = data2,
  family = binomial(link = "logit")
)
modelsummary(logit_model)
(1)
(Intercept) -5.321
(4.965)
GDP_per_capita -0.122
(0.409)
GDP_growth_rate 11.578
(11.212)
Science_and_Technology 45.341
(105.016)
LnPopulation 0.284
(0.400)
Research_Employment 16.197
(60.255)
Infra_Spending -1.642
(1.064)
Apply_Patent -0.381
(0.674)
Obtain_Patent 0.740
(0.686)
Num.Obs. 290
AIC 182.0
BIC 215.0
Log.Lik. -81.979
F 2.998
RMSE 0.29

Table 7: Endogeneity test political connection

library(dplyr)
library(tidyr)
library(kableExtra)
## 
## Attaching package: 'kableExtra'
## The following object is masked from 'package:dplyr':
## 
##     group_rows
# Define significance levels
significance_levels <- function(p_value) {
  if (p_value < 0.001) {
    return("***")
  } else if (p_value < 0.01) {
    return("**")
  } else if (p_value < 0.05) {
    return("*")
  } else {
    return("")
  }
}

# Create a contingency table based on the variable and Treat
create_contingency_table <- function(data, var) {
  data %>%
    select(all_of(c(var, "Treat"))) %>%
    count(across(c(!!sym(var), Treat))) %>%
    pivot_wider(names_from = Treat, values_from = n, values_fill = 0) %>%
    as.data.frame()
}

# Perform chi-squared tests
perform_chisq_test <- function(contingency_table) {
  chisq.test(contingency_table[,-1])$p.value
}

# Calculate tabulations by each variable
summary_work <- data1 %>%
  group_by(work, Treat) %>%
  summarise(count = n(), .groups = 'drop') %>%
  pivot_wider(names_from = Treat, values_from = count, values_fill = 0) %>%
  mutate(total = `0` + `1`, 
         proportion_with = `1` / total, 
         proportion_without = `0` / total)

summary_workboth <- data1 %>%
  group_by(workboth, Treat) %>%
  summarise(count = n(), .groups = 'drop') %>%
  pivot_wider(names_from = Treat, values_from = count, values_fill = 0) %>%
  mutate(total = `0` + `1`, 
         proportion_with = `1` / total, 
         proportion_without = `0` / total)

summary_birth <- data1 %>%
  group_by(birth, Treat) %>%
  summarise(count = n(), .groups = 'drop') %>%
  pivot_wider(names_from = Treat, values_from = count, values_fill = 0) %>%
  mutate(total = `0` + `1`, 
         proportion_with = `1` / total, 
         proportion_without = `0` / total)

summary_birthboth <- data1 %>%
  group_by(birthboth, Treat) %>%
  summarise(count = n(), .groups = 'drop') %>%
  pivot_wider(names_from = Treat, values_from = count, values_fill = 0) %>%
  mutate(total = `0` + `1`, 
         proportion_with = `1` / total, 
         proportion_without = `0` / total)

# Calculate proportions for each variable
proportions_work_with <- summary_work %>%
  filter(work == 1) %>%
  pull(proportion_with)

proportions_work_without <- summary_work %>%
  filter(work == 0) %>%
  pull(proportion_with)

proportions_workboth_with <- summary_workboth %>%
  filter(workboth == 1) %>%
  pull(proportion_with)

proportions_workboth_without <- summary_workboth %>%
  filter(workboth == 0) %>%
  pull(proportion_with)

proportions_birth_with <- summary_birth %>%
  filter(birth == 1) %>%
  pull(proportion_with)

proportions_birth_without <- summary_birth %>%
  filter(birth == 0) %>%
  pull(proportion_with)

proportions_birthboth_with <- summary_birthboth %>%
  filter(birthboth == 1) %>%
  pull(proportion_with)

proportions_birthboth_without <- summary_birthboth %>%
  filter(birthboth == 0) %>%
  pull(proportion_with)

# Combine proportions for all variables
combined_proportions <- tibble(
  variable = c("Work", "Workboth", "Birth", "Birthboth"),
  proportion_with = c(proportions_work_with, proportions_workboth_with, proportions_birth_with, proportions_birthboth_with),
  proportion_without = c(proportions_work_without, proportions_workboth_without, proportions_birth_without, proportions_birthboth_without),
  difference = c(proportions_work_without - proportions_work_with,
                 proportions_workboth_without - proportions_workboth_with,
                 proportions_birth_without - proportions_birth_with,
                 proportions_birthboth_without - proportions_birthboth_with)
)

# Create and test contingency tables
contingency_work <- create_contingency_table(data1, "work")
contingency_workboth <- create_contingency_table(data1, "workboth")
contingency_birth <- create_contingency_table(data1, "birth")
contingency_birthboth <- create_contingency_table(data1, "birthboth")

p_value_work <- perform_chisq_test(contingency_work)
p_value_workboth <- perform_chisq_test(contingency_workboth)
p_value_birth <- perform_chisq_test(contingency_birth)
p_value_birthboth <- perform_chisq_test(contingency_birthboth)

# Add significance asterisks based on chi-squared test p-values
combined_proportions <- combined_proportions %>%
  mutate(
    significance = c(significance_levels(p_value_work),
                     significance_levels(p_value_workboth),
                     significance_levels(p_value_birth),
                     significance_levels(p_value_birthboth))
  )

# Print table with 3 decimals
combined_proportions %>%
  kable(digits = 3, format = "pipe") %>%
  kable_styling()
variable proportion_with proportion_without difference significance
Work 0.343 0.589 0.246 ***
Workboth 0.321 0.615 0.294 ***
Birth 0.339 0.594 0.255 ***
Birthboth 0.321 0.615 0.294 ***

Table 8 : Robustness test excluding other industrial policies

# Install and load modelsummary if not already installed
if (!requireNamespace("modelsummary", quietly = TRUE)) install.packages("modelsummary")
library(modelsummary)
library(plm)
library(sandwich)

# Model for Invention
model_fe_invention <- plm(
  Invention ~ Treat_post + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC + SOE + IMPIND + High_Tech + factor(year),
  data = data1,
  index = c("code", "year"),
  model = "within"
)

# Clustered standard errors for Invention model
cluster_vcov_invention <- vcovHC(model_fe_invention, type = "HC1", cluster = "group")

# Model for Non_Invention
model_fe_non_invention <- plm(
  Non_Invention ~ Treat_post + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC + SOE + IMPIND + High_Tech + factor(year),
  data = data1,
  index = c("code", "year"),
  model = "within"
)

# Clustered standard errors for Non_Invention model
cluster_vcov_non_invention <- vcovHC(model_fe_non_invention, type = "HC1", cluster = "group")

# Create a list of models
model_list <- list(
  "Invention" = model_fe_invention,
  "Non-Invention" = model_fe_non_invention
)

# Create a summary table using modelsummary with clustered standard errors
modelsummary(
  model_list,
  vcov = list(cluster_vcov_invention, cluster_vcov_non_invention),  # Provide clustered standard errors
  stars = TRUE,  # Add significance stars
  digits = 3,    # Set number of decimal places
  output = "gt",  # Output using gt
  title = "Robustness Test Excluding Other Industrial Policies",  # Table title
  custom.note = "Standard errors are in parentheses. * p<0.1, ** p<0.05, *** p<0.01"  # Custom note
)
Robustness Test Excluding Other Industrial Policies
Invention Non-Invention
Treat_post 0.411*** -0.325**
(0.088) (0.108)
Size 0.133+ 0.133
(0.071) (0.086)
Age -0.119 0.279
(0.143) (0.187)
ROA 0.559 0.456
(0.394) (0.320)
Lev 0.432 0.398
(0.361) (0.392)
RD 2.306* 1.440+
(0.937) (0.829)
Pee 0.111 0.887+
(0.399) (0.461)
Inst -0.086 -0.046
(0.237) (0.226)
HHI -1.753 -2.101
(1.229) (1.537)
KZ -0.013 -0.034*
(0.019) (0.017)
CFC 0.503* 0.342+
(0.202) (0.177)
Capex 2.170** 1.385*
(0.704) (0.676)
NWC 0.475 0.494
(0.316) (0.342)
SOE -0.169 -0.047
(0.157) (0.153)
IMPIND 0.531 0.030
(0.359) (0.115)
High_Tech 0.005 -0.041
(0.045) (0.041)
factor(year)2013 0.076 -0.136*
(0.057) (0.067)
factor(year)2014 0.114 -0.174+
(0.082) (0.099)
factor(year)2015 0.151 -0.153
(0.099) (0.116)
factor(year)2016 0.100 -0.220
(0.115) (0.149)
factor(year)2017 0.082 -0.357*
(0.143) (0.163)
factor(year)2018 0.200 -0.465*
(0.151) (0.184)
factor(year)2019 0.199 -0.455*
(0.163) (0.200)
factor(year)2020 0.215 -0.427*
(0.171) (0.214)
factor(year)2021 0.043 -0.465*
(0.176) (0.231)
factor(year)2022 -0.266 -0.866***
(0.184) (0.243)
Num.Obs. 4422 4422
R2 0.045 0.052
R2 Adj. -0.057 -0.049
AIC 12926.2 11725.2
BIC 13098.9 11897.9
RMSE 1.04 0.91
Std.Errors Custom Custom
+ p < 0.1, * p < 0.05, ** p < 0.01, *** p < 0.001

Table 9: Robustness tests: DIDM

# Install and load required packages
library(DIDmultiplegt)
library(knitr)

#Preparing Variables to fit the DiD function
Y = "Invention"
G = "code"
T = "year"
D = "Treat_post"
did.mod1<- DIDmultiplegt::did_multiplegt(mode = "dyn", data1, Y ,
               G , T , D, controls = c("Size", "Age", "ROA", "Lev", "RD",
                                               "Pee", "Inst", "HHI", "KZ", "CFC",
                                               "Capex", "NWC", "SOE"),
               cluster = G, graph_off = TRUE)
Y = "Non_Invention"
did.mod2<- DIDmultiplegt::did_multiplegt(mode = "dyn", data1, Y ,
               G , T , D, controls = c("Size", "Age", "ROA", "Lev", "RD",
                                               "Pee", "Inst", "HHI", "KZ", "CFC",
                                               "Capex", "NWC", "SOE"),
               cluster = G, graph_off = TRUE)


# Create the data for both models
did.mod1.info <- data.frame(
  Term = "Treat x Post",
  Coefficient = paste0(0.24602, "**"),  # Add two asterisks for model 1
  SE = paste0("(", 0.11511, ")"),
  Controls = "Yes",
  Firm = "Yes",
  DV = "Invention",
  N = 673
)

did.mod2.info <- data.frame(
  Term = "Treat x Post",
  Coefficient = paste0(-0.31578, "***"),  # Add three asterisks for model 2
  SE = paste0("(", 0.12610, ")"),
  Controls = "Yes",
  Firm = "Yes",
  DV = "NonInvention",
  N = 673
)

# Merge the two models side by side
regression_table <- data.frame(
  Variable = c("Treat x Post", "SE", "Controls", "Firm", "Sample Size (N)"),
  Invention = c(did.mod1.info$Coefficient, did.mod1.info$SE, did.mod1.info$Controls, did.mod1.info$Firm, did.mod1.info$N),
  NonInvention = c(did.mod2.info$Coefficient, did.mod2.info$SE, did.mod2.info$Controls, did.mod2.info$Firm, did.mod2.info$N)
)

# Create the regression table using kable
kable(regression_table, caption = "DID with Multiple Groups and Periods", col.names = c("Variable", "Invention", "NonInvention"))
DID with Multiple Groups and Periods
Variable Invention NonInvention
Treat x Post 0.24602** -0.31578***
SE (0.11511) (0.1261)
Controls Yes Yes
Firm Yes Yes
Sample Size (N) 673 673
##N.B : These models can be ran using Statamarkdown package and the Stata sodtware installed in your device. There is did_multiplegt function in Stata. The purpose of this replication is to be done with R.

Table 10 : Robustness tests: excluding COVID-19

# Poisson regression for Invention
# Fit the Poisson regression models with fixed effects using fixest
library(texreg)
## Version:  1.39.4
## Date:     2024-07-23
## Author:   Philip Leifeld (University of Manchester)
## 
## Consider submitting praise using the praise or praise_interactive functions.
## Please cite the JSS article in your publications -- see citation("texreg").
## 
## Attaching package: 'texreg'
## The following object is masked from 'package:tidyr':
## 
##     extract
library(fixest)
library(lmtest)
library(sandwich)

# Fit the Poisson regression models with fixed effects using fixest
model1_covid <- fepois(
  Invention ~ Treat_post + Size + Age + ROA + Lev + RD + Pee + Inst + 
  HHI + KZ + CFC + Capex + NWC + SOE | year + code,
  data = data1
)
## NOTE: 0/21 fixed-effects (231 observations) removed because of only 0 outcomes.
model2_covid <- fepois(
  Non_Invention ~ Treat_post + Size + Age + ROA + Lev + RD + Pee + Inst + 
  HHI + KZ + CFC + Capex + NWC + SOE | year + code,
  data = data1
)
## NOTE: 0/40 fixed-effects (440 observations) removed because of only 0 outcomes.
model_list <- list(
  model1_covid,
  model2_covid
)

# Extract coefficient names from the models
coef_names_model1 <- names(coef(model1_covid))
coef_names_model2 <- names(coef(model2_covid))

# Generate the regression table
screenreg(model_list, digits = 3, omit.coef = "as.factor", custom.note = "Standard errors are in parentheses. %stars") ## Don't worry about the sample size, some internal regression procedures eliminate some observations as needed.
## 
## ===============================================
##                    Model 1        Model 2      
## -----------------------------------------------
## Treat_post             0.216 ***     -0.199 ***
##                       (0.036)        (0.030)   
## Size                   0.108 ***      0.101 ** 
##                       (0.026)        (0.035)   
## Age                   -0.054          0.217 ** 
##                       (0.046)        (0.070)   
## ROA                    0.437          0.464    
##                       (0.243)        (0.316)   
## Lev                    0.302          0.238    
##                       (0.188)        (0.146)   
## RD                     1.596 *        1.155 ***
##                       (0.809)        (0.276)   
## Pee                    0.076          0.702 ***
##                       (0.335)        (0.202)   
## Inst                  -0.040         -0.027    
##                       (0.138)        (0.070)   
## HHI                   -1.108 *       -1.658 ***
##                       (0.478)        (0.472)   
## KZ                    -0.009         -0.025    
##                       (0.016)        (0.019)   
## CFC                    0.355 *        0.233 ***
##                       (0.149)        (0.059)   
## Capex                  1.363 ***      0.926 ***
##                       (0.371)        (0.215)   
## NWC                    0.342          0.294    
##                       (0.186)        (0.151)   
## SOE                   -0.089         -0.005    
##                       (0.065)        (0.078)   
## -----------------------------------------------
## Num. obs.           4191           3982        
## Num. groups: year     11             11        
## Num. groups: code    381            362        
## Deviance            3590.621       3195.995    
## Log Likelihood     -5943.824      -5478.203    
## Pseudo R^2             0.176          0.175    
## ===============================================
## Standard errors are in parentheses. *** p < 0.001; ** p < 0.01; * p < 0.05

Table 11: Table 11 Robustness tests: excluding COVID-19

data_tab11 <- data1 %>% filter(year %in% 2012:2019)

model1 <- feols(Invention ~ Treat_post + Size + Age + ROA + Lev + RD + Pee + Inst + 
                  HHI + KZ + CFC + Capex + NWC + SOE | year + code,
                  data = data_tab11)

model2 <- feols(Non_Invention ~ Treat_post + Size + Age + ROA + Lev + RD + Pee + Inst + 
                  HHI + KZ + CFC + Capex + NWC + SOE | year + code,
                  data = data_tab11)

# Compute clustered standard errors
robust_se_model1 <- vcovCL(model1, cluster = ~ code)
robust_se_model2 <- vcovCL(model2, cluster = ~ code)

# Print the summary with robust standard errors
coeftest(model1, vcov = robust_se_model1)
## 
## t test of coefficients:
## 
##              Estimate Std. Error t value  Pr(>|t|)    
## Treat_post  0.4142976  0.0880798  4.7037  2.68e-06 ***
## Size        0.1451421  0.0810604  1.7905 0.0734752 .  
## Age        -0.0157880  0.1666197 -0.0948 0.9245163    
## ROA         0.5354778  0.5040081  1.0624 0.2881283    
## Lev         0.2642141  0.4137183  0.6386 0.5231142    
## RD          4.2339790  1.1339778  3.7337 0.0001924 ***
## Pee         0.6801758  0.4660919  1.4593 0.1445904    
## Inst        0.0066147  0.2517193  0.0263 0.9790373    
## HHI        -1.7248962  1.4049876 -1.2277 0.2196650    
## KZ         -0.0016786  0.0187698 -0.0894 0.9287441    
## CFC         0.5976832  0.1983019  3.0140 0.0026014 ** 
## Capex       1.5883923  0.7578024  2.0961 0.0361675 *  
## NWC         0.4961273  0.3559328  1.3939 0.1634650    
## SOE        -0.2820425  0.1833873 -1.5380 0.1241715    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
coeftest(model2, vcov = robust_se_model2)
## 
## t test of coefficients:
## 
##             Estimate Std. Error t value Pr(>|t|)   
## Treat_post -0.319970   0.102602 -3.1186 0.001836 **
## Size        0.081776   0.098740  0.8282 0.407627   
## Age         0.362981   0.198391  1.8296 0.067413 . 
## ROA         0.522515   0.378032  1.3822 0.167022   
## Lev         0.283536   0.416124  0.6814 0.495691   
## RD          1.888973   0.977214  1.9330 0.053335 . 
## Pee         1.129050   0.479779  2.3533 0.018678 * 
## Inst       -0.073431   0.243485 -0.3016 0.762991   
## HHI        -4.742767   1.776049 -2.6704 0.007620 **
## KZ         -0.035568   0.018899 -1.8820 0.059941 . 
## CFC         0.355045   0.203616  1.7437 0.081322 . 
## Capex       1.127906   0.666725  1.6917 0.090813 . 
## NWC         0.407746   0.354007  1.1518 0.249501   
## SOE        -0.104102   0.163970 -0.6349 0.525555   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
model_list<-list(model1,model2)
screenreg(model_list, digits = 3, omit.coef = "as.factor", custom.note = "Standard errors are in parentheses. %stars") ## Don't worry about the sample size, some internal regression procedures
## 
## ===============================================
##                        Model 1      Model 2    
## -----------------------------------------------
## Treat_post                0.414 **    -0.320 **
##                          (0.081)      (0.061)  
## Size                      0.145 *      0.082   
##                          (0.058)      (0.069)  
## Age                      -0.016        0.363 * 
##                          (0.093)      (0.140)  
## ROA                       0.535        0.523   
##                          (0.289)      (0.487)  
## Lev                       0.264        0.284   
##                          (0.289)      (0.258)  
## RD                        4.234 **     1.889 **
##                          (0.840)      (0.514)  
## Pee                       0.680        1.129 **
##                          (0.415)      (0.305)  
## Inst                      0.007       -0.073   
##                          (0.174)      (0.169)  
## HHI                      -1.725       -4.743 **
##                          (0.807)      (0.902)  
## KZ                       -0.002       -0.036   
##                          (0.032)      (0.029)  
## CFC                       0.598        0.355 * 
##                          (0.275)      (0.111)  
## Capex                     1.588        1.128 * 
##                          (0.880)      (0.352)  
## NWC                       0.496 **     0.408   
##                          (0.135)      (0.207)  
## SOE                      -0.282 *     -0.104   
##                          (0.093)      (0.192)  
## -----------------------------------------------
## Num. obs.              3216         3216       
## Num. groups: year         8            8       
## Num. groups: code       402          402       
## R^2 (full model)          0.674        0.697   
## R^2 (proj model)          0.029        0.025   
## Adj. R^2 (full model)     0.625        0.651   
## Adj. R^2 (proj model)     0.025        0.020   
## ===============================================
## Standard errors are in parentheses. *** p < 0.001; ** p < 0.01; * p < 0.05

Table 12: Mechanism Analysis

#1. Model Tax : 

model_tax <- feglm(Tax ~ Treat_post + Size + Age + ROA + Lev + RD + Pee + Inst + 
                    HHI + KZ + CFC + Capex + NWC + SOE | year + code,
                    data = data1,
                    family = gaussian())

# Compute clustered standard errors
robust_se_tax <- vcovCL(model_tax, cluster = ~ code)

#2. Invention with Tax

model_invention_tax <- feglm(Invention ~ Treat_post + Tax + Size + Age + ROA + Lev + RD + Pee + Inst + 
                              HHI + KZ + CFC + Capex + NWC + SOE | year + code,
                              data = data1,
                              family = gaussian())

# Compute clustered standard errors
robust_se_invention_tax <- vcovCL(model_invention_tax, cluster = ~ code)

#3. Subsidy
model_subsidy <- feglm(Subsidy ~ Treat_post + Size + Age + ROA + Lev + RD + Pee + Inst + 
                       HHI + KZ + CFC + Capex + NWC + SOE | year + code,
                       data = data1,
                       family = gaussian())
# Compute clustered standard errors
robust_se_subsidy <- vcovCL(model_subsidy, cluster = ~ code)

#4. Invention with Subsidy
model_invention_subsidy <- feglm(Invention ~ Treat_post + Subsidy + Size + Age + ROA + Lev + RD + Pee + Inst + 
                                  HHI + KZ + CFC + Capex + NWC + SOE | year + code,
                                  data = data1,
                                  family = gaussian())

# Compute clustered standard errors
robust_se_invention_subsidy <- vcovCL(model_invention_subsidy, cluster = ~ code)


#5. Financing
model_financing <- feglm(Financing ~ Treat_post + Size + Age + ROA + Lev + RD + Pee + Inst + 
                          HHI + KZ + CFC + Capex + NWC + SOE | year + code,
                          data = data1,
                          family = gaussian())

# Compute clustered standard errors
robust_se_financing <- vcovCL(model_financing, cluster = ~ code)

#6. Invention with Financing
model_invention_financing <- feglm(Invention ~ Treat_post + Financing + Size + 
                                    Age + ROA + Lev + RD + Pee + Inst + 
                                    HHI + KZ + CFC + Capex + NWC + SOE | year + code,
                                    data = data1,
                                    family = gaussian())

# Compute clustered standard errors
robust_se_invention_financing <- vcovCL(model_invention_financing, cluster = ~ code)


#7. Collaboration

model_collaboration <- feglm(Collaboration ~ Treat_post + Size + Age + ROA + Lev + RD + Pee + Inst + 
                              HHI + KZ + CFC + Capex + NWC + SOE | year + code,
                              data = data1,
                              family = gaussian())

# Compute clustered standard errors
robust_se_collaboration <- vcovCL(model_collaboration, cluster = ~ code)

#8. Invention with Collaboration

model_invention_collaboration <- feglm(Invention ~ Treat_post + Collaboration + Size + Age + ROA + Lev + RD + Pee + Inst + 
                                        HHI + KZ + CFC + Capex + NWC + SOE | year + code,
                                        data = data1,
                                        family = gaussian())

# Compute clustered standard errors
robust_se_invention_collaboration <- vcovCL(model_invention_collaboration, cluster = ~ code)

#9. Talent

model_talent <- feglm(Talent ~ Treat_post + Size + Age + ROA + Lev + RD + Pee + Inst + 
                      HHI + KZ + CFC + Capex + NWC + SOE | year + code,
                      data = data1,
                      family = gaussian())

# Compute clustered standard errors
robust_se_talent <- vcovCL(model_talent, cluster = ~ code)

#10. Invention with Talent

model_invention_talent <- feglm(Invention ~ Treat_post + Talent + Size + Age + ROA + Lev + RD + Pee + Inst + 
                                 HHI + KZ + CFC + Capex + NWC + SOE | year + code,
                                 data = data1,
                                 family = gaussian())

# Compute clustered standard errors
robust_se_invention_talent <- vcovCL(model_invention_talent, cluster = ~ code)

# Create a list of models for Table 1
model_list1 <- list(
  model_tax,
  model_invention_tax
)

# Create a list of robust standard errors for Table 1
robust_se_list1 <- list(
  robust_se_tax,
  robust_se_invention_tax
)

# Generate Table 1
screenreg(
  model_list1,
  custom.se = robust_se_list1,
  digits = 3,
  custom.note = "Standard errors are in parentheses. Significance stars: * p < 0.1, ** p < 0.05, *** p < 0.01",
  include.gof = c("nobs"),
  gof = c(4422),
  custom.model.names = c("Tax", "Invention with Tax"),
  label = "tab:regression_results_tax",
  caption = "OLS Regression Models with Fixed Effects and Clustered Standard Errors: Tax Models"
)
## 
## ===================================================
##                    Tax           Invention with Tax
## ---------------------------------------------------
## Treat_post           -0.023 **       0.390 ***     
##                      (0.006)        (0.062)        
## Size                  0.002          0.135 **      
##                      (0.009)        (0.039)        
## Age                  -0.009         -0.101         
##                      (0.015)        (0.071)        
## ROA                   0.619 ***      0.679         
##                      (0.079)        (0.310)        
## Lev                  -0.014          0.415         
##                      (0.060)        (0.257)        
## RD                   -0.032          2.338         
##                      (0.163)        (1.380)        
## Pee                   0.068          0.102         
##                      (0.069)        (0.454)        
## Inst                 -0.086         -0.103         
##                      (0.044)        (0.178)        
## HHI                   0.247         -1.587         
##                      (0.124)        (0.806)        
## KZ                    0.004         -0.013         
##                      (0.003)        (0.025)        
## CFC                  -0.073          0.492 *       
##                      (0.044)        (0.217)        
## Capex                -0.227 *        2.096 **      
##                      (0.095)        (0.553)        
## NWC                  -0.036          0.445         
##                      (0.046)        (0.266)        
## SOE                   0.012         -0.166         
##                      (0.021)        (0.118)        
## Tax                                 -0.238         
##                                     (0.144)        
## ---------------------------------------------------
## Num. obs.          4422           4422             
## Num. groups: year    11             11             
## Num. groups: code   402            402             
## Deviance            127.810       4760.790         
## Log Likelihood     1559.798      -6438.766         
## Pseudo R^2           -0.101          0.197         
## ===================================================
## Standard errors are in parentheses. Significance stars: * p < 0.1, ** p < 0.05, *** p < 0.01
# Create a list of models for Table 2
model_list2 <- list(
  model_subsidy,
  model_invention_subsidy
)

# Create a list of robust standard errors for Table 2
robust_se_list2 <- list(
  robust_se_subsidy,
  robust_se_invention_subsidy
)

# Generate Table 2
screenreg(
  model_list2,
  custom.se = robust_se_list2,
  digits = 3,
  custom.note = "Standard errors are in parentheses. Significance stars: * p < 0.1, ** p < 0.05, *** p < 0.01",
  include.gof = c("nobs"),
  gof = c(4422),
  custom.model.names = c("Subsidy", "Invention with Subsidy"),
  label = "tab:regression_results_subsidy",
  caption = "OLS Regression Models with Fixed Effects and Clustered Standard Errors: Subsidy Models"
)
## 
## ========================================================
##                    Subsidy        Invention with Subsidy
## --------------------------------------------------------
## Treat_post             0.002 *        0.379 ***         
##                       (0.001)        (0.060)            
## Size                  -0.001 *        0.140 **          
##                       (0.000)        (0.039)            
## Age                    0.002 ***     -0.122             
##                       (0.000)        (0.075)            
## ROA                    0.003          0.502             
##                       (0.003)        (0.317)            
## Lev                    0.004 *        0.385             
##                       (0.001)        (0.263)            
## RD                     0.008          2.267             
##                       (0.005)        (1.407)            
## Pee                    0.008 **       0.010             
##                       (0.002)        (0.461)            
## Inst                  -0.000         -0.081             
##                       (0.001)        (0.180)            
## HHI                   -0.013 *       -1.523             
##                       (0.005)        (0.846)            
## KZ                     0.000         -0.014             
##                       (0.000)        (0.026)            
## CFC                    0.002          0.494             
##                       (0.001)        (0.225)            
## Capex                  0.009 *        2.066 **          
##                       (0.003)        (0.558)            
## NWC                    0.004 **       0.413             
##                       (0.001)        (0.282)            
## SOE                   -0.001         -0.157             
##                       (0.001)        (0.116)            
## Subsidy                               9.502 **          
##                                      (2.990)            
## --------------------------------------------------------
## Num. obs.           4422           4422                 
## Num. groups: year     11             11                 
## Num. groups: code    402            402                 
## Deviance               0.143       4755.122             
## Log Likelihood     16589.906      -6436.132             
## Pseudo R^2            -0.014          0.197             
## ========================================================
## Standard errors are in parentheses. Significance stars: * p < 0.1, ** p < 0.05, *** p < 0.01
# Create a list of models for Table 3a
model_list3a <- list(
  model_financing,
  model_invention_financing,
  model_collaboration,
  model_invention_collaboration
)

# Create a list of robust standard errors for Table 3a
robust_se_list3a <- list(
  robust_se_financing,
  robust_se_invention_financing,
  robust_se_collaboration,
  robust_se_invention_collaboration
)

# Generate Table 3a
screenreg(
  model_list3a,
  custom.se = robust_se_list3a,
  digits = 3,
  custom.note = "Standard errors are in parentheses. Significance stars: * p < 0.1, ** p < 0.05, *** p < 0.01",
  include.gof = c("nobs"),
  gof = c(4422),
  custom.model.names = c("Financing", "Invention with Financing", "Collaboration", "Invention with Collaboration"),
  label = "tab:regression_results_financing_collaboration",
  caption = "OLS Regression Models with Fixed Effects and Clustered Standard Errors: Financing and Collaboration Models"
)
## 
## ======================================================================================================
##                    Financing     Invention with Financing  Collaboration  Invention with Collaboration
## ------------------------------------------------------------------------------------------------------
## Treat_post            0.013 **       0.373 ***                0.081 ***       0.372 ***               
##                      (0.003)        (0.067)                  (0.016)         (0.062)                  
## Size                 -0.016 **       0.163 **                 0.041 ***       0.123 **                
##                      (0.004)        (0.040)                  (0.008)         (0.038)                  
## Age                  -0.009         -0.083                    0.021          -0.105                   
##                      (0.005)        (0.073)                  (0.013)         (0.071)                  
## ROA                  -0.028          0.581                    0.026           0.525                   
##                      (0.020)        (0.323)                  (0.075)         (0.319)                  
## Lev                   0.132 ***      0.189                   -0.169 *         0.467                   
##                      (0.019)        (0.268)                  (0.060)         (0.247)                  
## RD                    0.002          2.343                    0.141           2.306                   
##                      (0.047)        (1.325)                  (0.232)         (1.362)                  
## Pee                  -0.091 **       0.245                   -0.067           0.105                   
##                      (0.023)        (0.457)                  (0.082)         (0.455)                  
## Inst                 -0.016         -0.055                   -0.138 **       -0.043                   
##                      (0.009)        (0.192)                  (0.035)         (0.174)                  
## HHI                  -0.078 *       -1.510                    0.150          -1.689                   
##                      (0.032)        (0.833)                  (0.136)         (0.843)                  
## KZ                    0.003 *       -0.019                   -0.002          -0.014                   
##                      (0.001)        (0.027)                  (0.004)         (0.026)                  
## CFC                  -0.007          0.521 *                 -0.041           0.521 *                 
##                      (0.010)        (0.218)                  (0.038)         (0.222)                  
## Capex                -0.093 **       2.312 **                 0.020           2.144 **                
##                      (0.024)        (0.583)                  (0.078)         (0.561)                  
## NWC                  -0.126 ***      0.672                   -0.109           0.484                   
##                      (0.013)        (0.306)                  (0.066)         (0.270)                  
## SOE                  -0.006         -0.158                   -0.032 *        -0.160                   
##                      (0.006)        (0.125)                  (0.011)         (0.121)                  
## Financing                            1.738 *                                                          
##                                     (0.592)                                                           
## Collaboration                                                                 0.284 *                 
##                                                                              (0.110)                  
## ------------------------------------------------------------------------------------------------------
## Num. obs.          4422           4422                     4422            4422                       
## Num. groups: year    11             11                       11              11                       
## Num. groups: code   402            402                      402             402                       
## Deviance             13.745       4726.475                  206.122        4751.358                   
## Log Likelihood     6490.047      -6422.772                  503.113       -6434.381                   
## Pseudo R^2           -0.381          0.199                    1.030           0.198                   
## ======================================================================================================
## Standard errors are in parentheses. Significance stars: * p < 0.1, ** p < 0.05, *** p < 0.01
# Create a list of models for Table 3b
model_list3b <- list(
  model_talent,
  model_invention_talent
)

# Create a list of robust standard errors for Table 3b
robust_se_list3b <- list(
  robust_se_talent,
  robust_se_invention_talent
)

# Generate Table 3b
screenreg(
  model_list3b,
  custom.se = robust_se_list3b,
  digits = 3,
  custom.note = "Standard errors are in parentheses. Significance stars: * p < 0.1, ** p < 0.05, *** p < 0.01",
  include.gof = c("nobs"),
  gof = c(4422),
  custom.model.names = c("Talent", "Invention with Talent"),
  label = "tab:regression_results_talent",
  caption = "OLS Regression Models with Fixed Effects and Clustered Standard Errors: Talent Models"
)
## 
## ======================================================
##                    Talent        Invention with Talent
## ------------------------------------------------------
## Treat_post            0.017 ***      0.382 ***        
##                      (0.002)        (0.060)           
## Size                  0.003          0.132 **         
##                      (0.002)        (0.040)           
## Age                   0.030 **      -0.121            
##                      (0.008)        (0.071)           
## ROA                  -0.033          0.557            
##                      (0.032)        (0.316)           
## Lev                  -0.013          0.428            
##                      (0.017)        (0.257)           
## RD                    0.126          2.254            
##                      (0.079)        (1.373)           
## Pee                  -0.083 **       0.147            
##                      (0.025)        (0.447)           
## Inst                 -0.022         -0.066            
##                      (0.014)        (0.176)           
## HHI                   0.037         -1.673            
##                      (0.063)        (0.821)           
## KZ                    0.001         -0.015            
##                      (0.001)        (0.025)           
## CFC                  -0.028          0.529 *          
##                      (0.019)        (0.225)           
## Capex                 0.006          2.145 **         
##                      (0.037)        (0.562)           
## NWC                  -0.051 *        0.490            
##                      (0.021)        (0.286)           
## SOE                   0.003         -0.171            
##                      (0.005)        (0.119)           
## Talent                               0.728 *          
##                                     (0.260)           
## ------------------------------------------------------
## Num. obs.          4422           4422                
## Num. groups: year    11             11                
## Num. groups: code   402            402                
## Deviance             17.763       4758.592            
## Log Likelihood     5922.999      -6437.745            
## Pseudo R^2           -0.784          0.197            
## ======================================================
## Standard errors are in parentheses. Significance stars: * p < 0.1, ** p < 0.05, *** p < 0.01

Table 13 :

#Data Filtering 
#Estimation of Models
library(fixest)

library(fixest)
library(dplyr)

# Model for fcity == 1
model_fcity1 <- feols(Invention ~ Treat_post + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC + SOE | year + code, 
                      data = data1 %>% filter(fcity == 1))

# Model for fcity == 2
model_fcity2 <- feols(Invention ~ Treat_post + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC + SOE | year + code, 
                      data = data1 %>% filter(fcity == 2))

# Model for fcity == 3
model_fcity3 <- feols(Invention ~ Treat_post + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC + SOE | year + code, 
                      data = data1 %>% filter(fcity == 3))

# Model for fhtce == 1
model_fhtce1 <- feols(Invention ~ Treat_post + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC + SOE | year + code, 
                      data = data1 %>% filter(fhtce == 1))

# Model for fhtce == 0
model_fhtce0 <- feols(Invention ~ Treat_post + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC + SOE | year + code, 
                      data = data1 %>% filter(fhtce == 0))

# Model for fsoe == 1
model_fsoe1 <- feols(Invention ~ Treat_post + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC | year + code, 
                     data = data1 %>% filter(fsoe == 1))

# Model for fsoe == 0
model_fsoe0 <- feols(Invention ~ Treat_post + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC | year + code, 
                     data = data1 %>% filter(fsoe == 0))

# Model for fsize == 1
model_fsize1 <- feols(Invention ~ Treat_post + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC + SOE | year + code, 
                      data = data1 %>% filter(fsize == 1))

# Model for fsize == 0
model_fsize0 <- feols(Invention ~ Treat_post + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC + SOE | year + code, 
                      data = data1 %>% filter(fsize == 0))
library(fixest)
library(texreg)

# Models (assuming these are already defined)
models <- list(
  fcity1 = model_fcity1,
  fcity2 = model_fcity2,
  fcity3 = model_fcity3,
  fhtce1 = model_fhtce1,
  fhtce0 = model_fhtce0,
  fsoe1 = model_fsoe1,
  fsoe0 = model_fsoe0,
  fsize1 = model_fsize1,
  fsize0 = model_fsize0
)

# Create a list for `screenreg`
model_list1 <- list(model_fcity1, model_fcity2)
model_list2 <- list(model_fcity3, model_fhtce1, model_fhtce0)
model_list3 <- list(model_fsoe1, model_fsoe0)
model_list4 <- list(model_fsize1, model_fsize0)

# Print tables with screenreg
library(texreg)

# Table 1: fcity models
screenreg(model_list1, 
          custom.model.names = c("Eastern China", "Middle China"),
          digits = 3,
          omit.coef = "as.factor",
          custom.note = "Standard errors are in parentheses. %stars")
## 
## ==================================================
##                        Eastern China  Middle China
## --------------------------------------------------
## Treat_post                0.394 **       0.340 *  
##                          (0.089)        (0.124)   
## Size                      0.141 *        0.160    
##                          (0.047)        (0.126)   
## Age                       0.097         -0.377    
##                          (0.127)        (0.183)   
## ROA                       0.107          1.678 *  
##                          (0.356)        (0.691)   
## Lev                       0.082          1.178 *  
##                          (0.318)        (0.464)   
## RD                        2.054          5.591 ** 
##                          (1.703)        (1.607)   
## Pee                       0.351          0.047    
##                          (0.409)        (0.807)   
## Inst                      0.116         -0.380    
##                          (0.194)        (0.372)   
## HHI                      -3.303          0.591    
##                          (1.591)        (1.698)   
## KZ                       -0.012         -0.009    
##                          (0.033)        (0.023)   
## CFC                       0.631 *        0.018    
##                          (0.279)        (0.250)   
## Capex                     2.069 **       2.451    
##                          (0.576)        (1.233)   
## NWC                       0.351          0.517    
##                          (0.202)        (0.718)   
## SOE                      -0.316         -0.035    
##                          (0.144)        (0.307)   
## --------------------------------------------------
## Num. obs.              2601           1323        
## Num. groups: year        11             11        
## Num. groups: code       238            126        
## R^2 (full model)          0.585          0.676    
## R^2 (proj model)          0.023          0.031    
## Adj. R^2 (full model)     0.539          0.635    
## Adj. R^2 (proj model)     0.017          0.020    
## ==================================================
## Standard errors are in parentheses. *** p < 0.001; ** p < 0.01; * p < 0.05
# Table 2: fcity 3, fhtce models
screenreg(model_list2, 
          custom.model.names = c("Western China", "High Tech", "Non High-Tech"),
          digits = 3,
          omit.coef = "as.factor",
          custom.note = "Standard errors are in parentheses. %stars")
## 
## =================================================================
##                        Western China  High Tech     Non High-Tech
## -----------------------------------------------------------------
## Treat_post               0.760 **        0.432 ***     0.302 **  
##                         (0.195)         (0.086)       (0.076)    
## Size                     0.176           0.204 *       0.013     
##                         (0.141)         (0.068)       (0.071)    
## Age                     -0.400          -0.067        -0.187     
##                         (0.250)         (0.092)       (0.099)    
## ROA                     -0.929           0.840        -0.036     
##                         (0.958)         (0.517)       (0.593)    
## Lev                      0.212           1.069 *      -0.575     
##                         (0.690)         (0.369)       (0.313)    
## RD                      -1.044           2.826         2.169     
##                         (1.409)         (1.317)       (1.633)    
## Pee                     -1.352           0.531        -0.646     
##                         (1.144)         (0.580)       (0.510)    
## Inst                     0.202           0.039        -0.339     
##                         (0.361)         (0.221)       (0.222)    
## HHI                      0.110          -1.462        -1.376     
##                         (1.623)         (1.230)       (1.178)    
## KZ                      -0.059          -0.020        -0.008     
##                         (0.047)         (0.029)       (0.028)    
## CFC                      0.854           0.505         0.531 *   
##                         (0.604)         (0.278)       (0.203)    
## Capex                    2.201           2.901 **      0.626     
##                         (1.562)         (0.639)       (1.206)    
## NWC                      0.825           0.835 *      -0.135     
##                         (1.018)         (0.329)       (0.360)    
## SOE                      0.420 *        -0.246        -0.043     
##                         (0.141)         (0.172)       (0.158)    
## -----------------------------------------------------------------
## Num. obs.              498            3124          1298         
## Num. groups: year       11              11            11         
## Num. groups: code       46             284           118         
## R^2 (full model)         0.608           0.594         0.660     
## R^2 (proj model)         0.069           0.028         0.020     
## Adj. R^2 (full model)    0.545           0.549         0.618     
## Adj. R^2 (proj model)    0.038           0.024         0.008     
## =================================================================
## Standard errors are in parentheses. *** p < 0.001; ** p < 0.01; * p < 0.05
# Table 3: fsoe models
screenreg(model_list3, 
          custom.model.names = c("SOE", "Non-SOE"),
          digits = 3,
          omit.coef = "as.factor",
          custom.note = "Standard errors are in parentheses. %stars")
## 
## =================================================
##                        SOE           Non-SOE     
## -------------------------------------------------
## Treat_post                0.596 ***     0.337 ** 
##                          (0.090)       (0.076)   
## Size                      0.048         0.166 ** 
##                          (0.091)       (0.046)   
## Age                      -0.712 **      0.095    
##                          (0.169)       (0.090)   
## ROA                       0.975         0.484    
##                          (0.848)       (0.379)   
## Lev                       0.465         0.350    
##                          (0.537)       (0.315)   
## RD                        2.287         2.167    
##                          (2.356)       (1.330)   
## Pee                       0.006         0.055    
##                          (0.584)       (0.529)   
## Inst                     -0.470        -0.045    
##                          (0.481)       (0.176)   
## HHI                      -2.953 *      -0.899    
##                          (1.260)       (0.816)   
## KZ                       -0.064         0.001    
##                          (0.034)       (0.025)   
## CFC                      -0.076         0.606    
##                          (0.550)       (0.273)   
## Capex                     2.579         2.118 ***
##                          (1.706)       (0.431)   
## NWC                      -0.287         0.624 *  
##                          (0.771)       (0.225)   
## -------------------------------------------------
## Num. obs.              1265          3157        
## Num. groups: year        11            11        
## Num. groups: code       115           287        
## R^2 (full model)          0.725         0.538    
## R^2 (proj model)          0.043         0.018    
## Adj. R^2 (full model)     0.691         0.488    
## Adj. R^2 (proj model)     0.032         0.014    
## =================================================
## Standard errors are in parentheses. *** p < 0.001; ** p < 0.01; * p < 0.05
# Table 4: fsize models
screenreg(model_list4, 
          custom.model.names = c("Large Size", "Small Size"),
          digits = 3,
          omit.coef = "as.factor",
          custom.note = "Standard errors are in parentheses. %stars")
## 
## ================================================
##                        Large Size    Small Size 
## ------------------------------------------------
## Treat_post                0.391 ***     0.439 **
##                          (0.072)       (0.098)  
## Size                      0.159         0.160   
##                          (0.083)       (0.078)  
## Age                      -0.278 *      -0.151   
##                          (0.105)       (0.145)  
## ROA                       0.976         0.207   
##                          (0.743)       (0.453)  
## Lev                       0.282         0.437   
##                          (0.395)       (0.376)  
## RD                        0.717         3.166 * 
##                          (2.183)       (1.105)  
## Pee                      -0.550         0.901   
##                          (0.504)       (0.677)  
## Inst                      0.058        -0.225   
##                          (0.302)       (0.208)  
## HHI                      -3.392 *       0.650   
##                          (1.481)       (1.573)  
## KZ                       -0.017        -0.016   
##                          (0.033)       (0.028)  
## CFC                       0.434         0.548   
##                          (0.270)       (0.322)  
## Capex                     2.049         1.822   
##                          (1.070)       (1.032)  
## NWC                       0.093         0.576   
##                          (0.357)       (0.415)  
## SOE                       0.056        -0.307   
##                          (0.170)       (0.156)  
## ------------------------------------------------
## Num. obs.              2211          2211       
## Num. groups: year        11            11       
## Num. groups: code       201           201       
## R^2 (full model)          0.681         0.457   
## R^2 (proj model)          0.027         0.026   
## Adj. R^2 (full model)     0.645         0.395   
## Adj. R^2 (proj model)     0.021         0.019   
## ================================================
## Standard errors are in parentheses. *** p < 0.001; ** p < 0.01; * p < 0.05

Table 14: Economic performance analysis: Tobin’s Q and TFP

# Model 1: TQ as dependent variable
model_TQ <- feols(TQ ~ Treat_post + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC + SOE | year + code,
                   data = data1)

# Model 2: TQ with Invention as an additional explanatory variable
model_TQ_invention <- feols(TQ ~ Treat_post + Invention + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC + SOE | year + code,
                            data = data1)

# Model 3: TFP_LP as dependent variable
model_TFP_LP <- feols(TFP_LP ~ Treat_post + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC + SOE | year + code,
                      data = data1)

# Model 4: TFP_LP with Invention as an additional explanatory variable
model_TFP_LP_invention <- feols(TFP_LP ~ Treat_post + Invention + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC + SOE | year + code,
                                 data = data1)

models <- list(
  TQ = model_TQ,
  TQ_Invention = model_TQ_invention,
  TFP_LP = model_TFP_LP,
  TFP_LP_Invention = model_TFP_LP_invention
)

# Print models using screenreg
screenreg(models, 
          custom.model.names = c("TQ", "TQ with Invention", "TFP_LP", "TFP_LP with Invention"),
          digits = 3,
          omit.coef = "as.factor",
          custom.note = "Standard errors are in parentheses. %stars")
## 
## ===========================================================================================
##                        TQ            TQ with Invention  TFP_LP        TFP_LP with Invention
## -------------------------------------------------------------------------------------------
## Treat_post                0.203 **      0.171 **           0.110 ***     0.091 **          
##                          (0.053)       (0.043)            (0.023)       (0.023)            
## Size                     -0.521 ***    -0.532 ***          0.417 ***     0.410 ***         
##                          (0.079)       (0.076)            (0.028)       (0.028)            
## Age                       0.337 *       0.345 *            0.150 ***     0.155 ***         
##                          (0.126)       (0.123)            (0.028)       (0.026)            
## ROA                       2.333 **      2.290 **           0.808 **      0.783 **          
##                          (0.592)       (0.591)            (0.212)       (0.205)            
## Lev                       0.194         0.161              0.633 ***     0.613 ***         
##                          (0.245)       (0.234)            (0.126)       (0.121)            
## RD                       -3.086 **     -3.276 **          -3.366 ***    -3.477 ***         
##                          (0.707)       (0.722)            (0.368)       (0.360)            
## Pee                       0.153         0.147             -1.159 ***    -1.163 ***         
##                          (0.283)       (0.303)            (0.185)       (0.180)            
## Inst                      1.706 ***     1.713 ***          0.363 **      0.366 **          
##                          (0.201)       (0.200)            (0.090)       (0.095)            
## HHI                       0.070         0.203              3.145 ***     3.224 ***         
##                          (1.031)       (1.064)            (0.475)       (0.468)            
## KZ                        0.187 ***     0.188 ***          0.009         0.009             
##                          (0.014)       (0.015)            (0.008)       (0.008)            
## CFC                      -0.030        -0.071              0.130         0.106             
##                          (0.103)       (0.103)            (0.086)       (0.087)            
## Capex                     1.842 **      1.668 **           0.180         0.077             
##                          (0.423)       (0.445)            (0.249)       (0.238)            
## NWC                       0.555 **      0.518 **           0.460 **      0.439 **          
##                          (0.137)       (0.127)            (0.109)       (0.105)            
## SOE                      -0.223 *      -0.210 *           -0.051        -0.043             
##                          (0.083)       (0.084)            (0.050)       (0.047)            
## Invention                               0.081                            0.048 **          
##                                        (0.037)                          (0.014)            
## -------------------------------------------------------------------------------------------
## Num. obs.              4422          4422               4422          4422                 
## Num. groups: year        11            11                 11            11                 
## Num. groups: code       402           402                402           402                 
## R^2 (full model)          0.634         0.637              0.824         0.827             
## R^2 (proj model)          0.139         0.148              0.232         0.243             
## Adj. R^2 (full model)     0.595         0.599              0.805         0.808             
## Adj. R^2 (proj model)     0.136         0.145              0.229         0.241             
## ===========================================================================================
## Standard errors are in parentheses. *** p < 0.001; ** p < 0.01; * p < 0.05

Table 15: Economic performance analysis: spillover effect

## Getting data
path3<-file.path("C:\\Users\\hassa\\OneDrive\\Documents\\R\\Datasets\\data3.dta")
data3<-read_dta(path3)

#Estimating Models

# Model 1: Inventiont as dependent variable
model_Inventiont <- feols(Inventiont ~ Border_Post + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC + SOE | year + code, 
                          data = data3)

# Model 2: Inventiont1 as dependent variable
model_Inventiont1 <- feols(Inventiont1 ~ Border_Post + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC + SOE | year + code, 
                           data = data3)

# Model 3: Inventiont2 as dependent variable
model_Inventiont2 <- feols(Inventiont2 ~ Border_Post + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC + SOE | year + code, 
                           data = data3)

# Model 4: Inventiont3 as dependent variable
model_Inventiont3 <- feols(Inventiont3 ~ Border_Post + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC + SOE | year + code, 
                           data = data3)

models <- list(
  Inventiont = model_Inventiont,
  Inventiont1 = model_Inventiont1,
  Inventiont2 = model_Inventiont2,
  Inventiont3 = model_Inventiont3
)

# Print models using screenreg
screenreg(models, 
          custom.model.names = c("Inventiont", "Inventiont1", "Inventiont2", "Inventiont3"),
          digits = 3,
          omit.coef = "as.factor",
          custom.note = "Standard errors are in parentheses. %stars")
## 
## ===========================================================================
##                        Inventiont    Inventiont1  Inventiont2   Inventiont3
## ---------------------------------------------------------------------------
## Border_Post               0.102         0.117        0.187 *       0.252 **
##                          (0.063)       (0.053)      (0.075)       (0.066)  
## Size                      0.172 ***    -0.035       -0.126        -0.253 * 
##                          (0.033)       (0.132)      (0.103)       (0.087)  
## Age                      -0.175        -0.217 *     -0.116        -0.301 **
##                          (0.095)       (0.080)      (0.060)       (0.094)  
## ROA                       0.182         0.365       -0.069         0.377   
##                          (0.326)       (0.399)      (0.345)       (0.386)  
## Lev                      -0.004         0.096        0.180         0.105   
##                          (0.170)       (0.181)      (0.231)       (0.242)  
## RD                        0.873        -0.590       -0.600         0.237   
##                          (0.962)       (0.607)      (0.826)       (1.043)  
## Pee                       0.270        -0.130        0.298         0.445   
##                          (0.193)       (0.299)      (0.284)       (0.323)  
## Inst                      0.217         0.452        0.207         0.204   
##                          (0.148)       (0.209)      (0.231)       (0.187)  
## HHI                      -2.263 **     -1.702       -2.277 ***    -0.197   
##                          (0.650)       (0.789)      (0.458)       (0.967)  
## KZ                       -0.005         0.005        0.006        -0.020   
##                          (0.011)       (0.014)      (0.018)       (0.018)  
## CFC                       0.189         0.093        0.106         0.032   
##                          (0.101)       (0.190)      (0.203)       (0.207)  
## Capex                     1.410 *       0.176       -0.166        -1.383   
##                          (0.495)       (0.556)      (0.912)       (0.967)  
## NWC                       0.048         0.077        0.337        -0.128   
##                          (0.121)       (0.174)      (0.157)       (0.228)  
## SOE                       0.022         0.067        0.239 *       0.131   
##                          (0.083)       (0.078)      (0.105)       (0.089)  
## ---------------------------------------------------------------------------
## Num. obs.              3454          3454         3454          3454       
## Num. groups: year        11            11           11            11       
## Num. groups: code       314           314          314           314       
## R^2 (full model)          0.729         0.632        0.541         0.507   
## R^2 (proj model)          0.019         0.009        0.009         0.015   
## Adj. R^2 (full model)     0.700         0.592        0.492         0.454   
## Adj. R^2 (proj model)     0.014         0.004        0.005         0.011   
## ===========================================================================
## Standard errors are in parentheses. *** p < 0.001; ** p < 0.01; * p < 0.05

Figures: Propensity scores density before and after matching

#Corrspnding data acquisition
library(plotly)
## 
## Attaching package: 'plotly'
## The following object is masked from 'package:ggplot2':
## 
##     last_plot
## The following object is masked from 'package:stats':
## 
##     filter
## The following object is masked from 'package:graphics':
## 
##     layout
path4<-file.path("C:\\Users\\hassa\\OneDrive\\Documents\\R\\Datasets\\data4.dta")
data4<-read_dta(path4)

data4 <- data4 %>%
  rename("pscore" = "_pscore")
data4 <- data4 %>%
  rename("weight" = "_weight")


# Kernel Density Plot Before
plot_before <- data4 %>%
  plot_ly(x = ~pscore, color = ~factor(treat), type = "histogram", 
          histnorm = "density", opacity = 0.7) %>%
  layout(title = "Before",
         xaxis = list(title = "pscore"),
         yaxis = list(title = "Density"),
         legend = list(title = list(text = "Group")))

# Kernel Density Plot After
plot_after <- data4 %>%
  filter(!is.na(weight)) %>%
  plot_ly(x = ~pscore, color = ~factor(treat), type = "histogram", 
          histnorm = "density", opacity = 0.7) %>%
  layout(title = "After",
         xaxis = list(title = "pscore"),
         yaxis = list(title = "Density"),
         legend = list(title = list(text = "Group")))

# Combine plots side by side
subplot(plot_before, plot_after, nrows = 1, margin = 0.05)
## Warning in RColorBrewer::brewer.pal(N, "Set2"): minimal value for n is 3, returning requested palette with 3 different levels
## Warning in RColorBrewer::brewer.pal(N, "Set2"): minimal value for n is 3, returning requested palette with 3 different levels
## Warning in RColorBrewer::brewer.pal(N, "Set2"): minimal value for n is 3, returning requested palette with 3 different levels
## Warning in RColorBrewer::brewer.pal(N, "Set2"): minimal value for n is 3, returning requested palette with 3 different levels

Figure: Parallel trend tests

library(broom)

path1<-file.path("C:\\Users\\hassa\\OneDrive\\Documents\\R\\Datasets\\data1.dta")
data1<-read_dta(path1)

# Run the regressions
model_invention <- feols(Invention ~ Treat_Before4 + Treat_Before3 + Treat_Before2 + Treat_Current + Treat_After1 + Treat_After2 + Treat_After3 + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC + SOE | year + code, data = data1)
model_non_invention <- feols(Non_Invention ~ Treat_Before4 + Treat_Before3 + Treat_Before2 + Treat_Current + Treat_After1 + Treat_After2 + Treat_After3 + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC + SOE | year + code, data = data1)

# Tidy model outputs
tidy_invention <- tidy(model_invention, conf.int = TRUE) %>%
  filter(term %in% c("Treat_Before4", "Treat_Before3", "Treat_Before2", "Treat_Current", "Treat_After1", "Treat_After2", "Treat_After3"))

tidy_non_invention <- tidy(model_non_invention, conf.int = TRUE) %>%
  filter(term %in% c("Treat_Before4", "Treat_Before3", "Treat_Before2", "Treat_Current", "Treat_After1", "Treat_After2", "Treat_After3"))

# Plot Invention coefficients
plot_invention<-ggplot(tidy_invention, aes(x = term, y = estimate)) +
  geom_point(color = "blue") +
  geom_line(aes(group = 1), color = "blue") +
  geom_errorbar(aes(ymin = conf.low, ymax = conf.high), width = 0.2, color = "blue") +
  geom_hline(yintercept = 0, linetype = "dashed", color = "red") +
  theme_minimal() +
  labs(title = "Regression Coefficients for Invention",
       x = "Year of MIC 2025",
       y = "Coefficient") +
  theme(axis.text.x = element_text(angle = 45, hjust = 1))

# Plot Non-Invention coefficients
plot_noninvention<-ggplot(tidy_non_invention, aes(x = term, y = estimate)) +
  geom_point(color = "green") +
  geom_line(aes(group = 1), color = "green") +
  geom_errorbar(aes(ymin = conf.low, ymax = conf.high), width = 0.2, color = "green") +
  geom_hline(yintercept = 0, linetype = "dashed", color = "red") +
  theme_minimal() +
  labs(title = "Regression Coefficients for Non_Invention",
       x = "Year of MIC 2025",
       y = "Coefficient") +
  theme(axis.text.x = element_text(angle = 45, hjust = 1))

subplot(plot_invention, plot_noninvention, nrows = 1, margin = 0.05)

Figure : Placebo tests

# Load necessary libraries
library(tidyverse)
library(broom)

# Getting Data
path5<-file.path("C:\\Users\\hassa\\OneDrive\\Documents\\R\\Datasets\\data5.dta")
data5<-read_dta(path5)



# Initialize lists to store results
placebo_results_invention <- vector("list", 1000)
placebo_results_non_invention <- vector("list", 1000)

# Loop through 1000 iterations
for (i in 1:1000) {
  cat("Processing iteration:", i, "\n")
  
  # Load and shuffle data
  data <- data5 %>%
    mutate(obs_id = row_number(),
           random_digit = runif(n())) %>%
    arrange(random_digit) %>%
    mutate(random_id = row_number(),
           placebohsrfirm = ifelse(random_id <= 1337, 1, 0),
           fake = placebohsrfirm * post)

  # Save the random_placebohsrfirm data temporarily
  data_placebo <- data %>%
    select(random_id, placebohsrfirm) %>%
    rename(random_placebohsrfirm = placebohsrfirm,
           id = random_id)
  
  # Save and reload raw data without random fields
  data_raw <- data %>%
    select(-random_digit, -random_id) %>%
    rename(id = obs_id)
  
  # Merge placebo data with the original data
  data_combined <- data_raw %>%
    left_join(data_placebo, by = "id")
  
  # Run the regression for 'Invention'
  model_invention <- feols(Invention ~ random_placebohsrfirm + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC + SOE | code + year, 
                           data = data_combined, 
                           cluster = ~code)
  
  # Save the coefficient and standard error for 'placebohsrfirm'
  model_summary_invention <- tidy(model_invention) %>%
    filter(term == "random_placebohsrfirm") %>%
    select(estimate, std.error) %>%
    mutate(iteration = i)
  
  placebo_results_invention[[i]] <- model_summary_invention
  
  # Run the regression for 'Non_Invention'
  model_non_invention <- feols(Non_Invention ~ random_placebohsrfirm + Size + Age + ROA + Lev + RD + Pee + Inst + HHI + KZ + CFC + Capex + NWC + SOE | code + year, 
                               data = data_combined, 
                               cluster = ~code)
  
  # Save the coefficient and standard error for 'placebohsrfirm'
  model_summary_non_invention <- tidy(model_non_invention) %>%
    filter(term == "random_placebohsrfirm") %>%
    select(estimate, std.error) %>%
    mutate(iteration = i)
  
  placebo_results_non_invention[[i]] <- model_summary_non_invention
}
## Processing iteration: 1 
## Processing iteration: 2 
## Processing iteration: 3 
## Processing iteration: 4 
## Processing iteration: 5 
## Processing iteration: 6 
## Processing iteration: 7 
## Processing iteration: 8 
## Processing iteration: 9 
## Processing iteration: 10 
## Processing iteration: 11 
## Processing iteration: 12 
## Processing iteration: 13 
## Processing iteration: 14 
## Processing iteration: 15 
## Processing iteration: 16 
## Processing iteration: 17 
## Processing iteration: 18 
## Processing iteration: 19 
## Processing iteration: 20 
## Processing iteration: 21 
## Processing iteration: 22 
## Processing iteration: 23 
## Processing iteration: 24 
## Processing iteration: 25 
## Processing iteration: 26 
## Processing iteration: 27 
## Processing iteration: 28 
## Processing iteration: 29 
## Processing iteration: 30 
## Processing iteration: 31 
## Processing iteration: 32 
## Processing iteration: 33 
## Processing iteration: 34 
## Processing iteration: 35 
## Processing iteration: 36 
## Processing iteration: 37 
## Processing iteration: 38 
## Processing iteration: 39 
## Processing iteration: 40 
## Processing iteration: 41 
## Processing iteration: 42 
## Processing iteration: 43 
## Processing iteration: 44 
## Processing iteration: 45 
## Processing iteration: 46 
## Processing iteration: 47 
## Processing iteration: 48 
## Processing iteration: 49 
## Processing iteration: 50 
## Processing iteration: 51 
## Processing iteration: 52 
## Processing iteration: 53 
## Processing iteration: 54 
## Processing iteration: 55 
## Processing iteration: 56 
## Processing iteration: 57 
## Processing iteration: 58 
## Processing iteration: 59 
## Processing iteration: 60 
## Processing iteration: 61 
## Processing iteration: 62 
## Processing iteration: 63 
## Processing iteration: 64 
## Processing iteration: 65 
## Processing iteration: 66 
## Processing iteration: 67 
## Processing iteration: 68 
## Processing iteration: 69 
## Processing iteration: 70 
## Processing iteration: 71 
## Processing iteration: 72 
## Processing iteration: 73 
## Processing iteration: 74 
## Processing iteration: 75 
## Processing iteration: 76 
## Processing iteration: 77 
## Processing iteration: 78 
## Processing iteration: 79 
## Processing iteration: 80 
## Processing iteration: 81 
## Processing iteration: 82 
## Processing iteration: 83 
## Processing iteration: 84 
## Processing iteration: 85 
## Processing iteration: 86 
## Processing iteration: 87 
## Processing iteration: 88 
## Processing iteration: 89 
## Processing iteration: 90 
## Processing iteration: 91 
## Processing iteration: 92 
## Processing iteration: 93 
## Processing iteration: 94 
## Processing iteration: 95 
## Processing iteration: 96 
## Processing iteration: 97 
## Processing iteration: 98 
## Processing iteration: 99 
## Processing iteration: 100 
## Processing iteration: 101 
## Processing iteration: 102 
## Processing iteration: 103 
## Processing iteration: 104 
## Processing iteration: 105 
## Processing iteration: 106 
## Processing iteration: 107 
## Processing iteration: 108 
## Processing iteration: 109 
## Processing iteration: 110 
## Processing iteration: 111 
## Processing iteration: 112 
## Processing iteration: 113 
## Processing iteration: 114 
## Processing iteration: 115 
## Processing iteration: 116 
## Processing iteration: 117 
## Processing iteration: 118 
## Processing iteration: 119 
## Processing iteration: 120 
## Processing iteration: 121 
## Processing iteration: 122 
## Processing iteration: 123 
## Processing iteration: 124 
## Processing iteration: 125 
## Processing iteration: 126 
## Processing iteration: 127 
## Processing iteration: 128 
## Processing iteration: 129 
## Processing iteration: 130 
## Processing iteration: 131 
## Processing iteration: 132 
## Processing iteration: 133 
## Processing iteration: 134 
## Processing iteration: 135 
## Processing iteration: 136 
## Processing iteration: 137 
## Processing iteration: 138 
## Processing iteration: 139 
## Processing iteration: 140 
## Processing iteration: 141 
## Processing iteration: 142 
## Processing iteration: 143 
## Processing iteration: 144 
## Processing iteration: 145 
## Processing iteration: 146 
## Processing iteration: 147 
## Processing iteration: 148 
## Processing iteration: 149 
## Processing iteration: 150 
## Processing iteration: 151 
## Processing iteration: 152 
## Processing iteration: 153 
## Processing iteration: 154 
## Processing iteration: 155 
## Processing iteration: 156 
## Processing iteration: 157 
## Processing iteration: 158 
## Processing iteration: 159 
## Processing iteration: 160 
## Processing iteration: 161 
## Processing iteration: 162 
## Processing iteration: 163 
## Processing iteration: 164 
## Processing iteration: 165 
## Processing iteration: 166 
## Processing iteration: 167 
## Processing iteration: 168 
## Processing iteration: 169 
## Processing iteration: 170 
## Processing iteration: 171 
## Processing iteration: 172 
## Processing iteration: 173 
## Processing iteration: 174 
## Processing iteration: 175 
## Processing iteration: 176 
## Processing iteration: 177 
## Processing iteration: 178 
## Processing iteration: 179 
## Processing iteration: 180 
## Processing iteration: 181 
## Processing iteration: 182 
## Processing iteration: 183 
## Processing iteration: 184 
## Processing iteration: 185 
## Processing iteration: 186 
## Processing iteration: 187 
## Processing iteration: 188 
## Processing iteration: 189 
## Processing iteration: 190 
## Processing iteration: 191 
## Processing iteration: 192 
## Processing iteration: 193 
## Processing iteration: 194 
## Processing iteration: 195 
## Processing iteration: 196 
## Processing iteration: 197 
## Processing iteration: 198 
## Processing iteration: 199 
## Processing iteration: 200 
## Processing iteration: 201 
## Processing iteration: 202 
## Processing iteration: 203 
## Processing iteration: 204 
## Processing iteration: 205 
## Processing iteration: 206 
## Processing iteration: 207 
## Processing iteration: 208 
## Processing iteration: 209 
## Processing iteration: 210 
## Processing iteration: 211 
## Processing iteration: 212 
## Processing iteration: 213 
## Processing iteration: 214 
## Processing iteration: 215 
## Processing iteration: 216 
## Processing iteration: 217 
## Processing iteration: 218 
## Processing iteration: 219 
## Processing iteration: 220 
## Processing iteration: 221 
## Processing iteration: 222 
## Processing iteration: 223 
## Processing iteration: 224 
## Processing iteration: 225 
## Processing iteration: 226 
## Processing iteration: 227 
## Processing iteration: 228 
## Processing iteration: 229 
## Processing iteration: 230 
## Processing iteration: 231 
## Processing iteration: 232 
## Processing iteration: 233 
## Processing iteration: 234 
## Processing iteration: 235 
## Processing iteration: 236 
## Processing iteration: 237 
## Processing iteration: 238 
## Processing iteration: 239 
## Processing iteration: 240 
## Processing iteration: 241 
## Processing iteration: 242 
## Processing iteration: 243 
## Processing iteration: 244 
## Processing iteration: 245 
## Processing iteration: 246 
## Processing iteration: 247 
## Processing iteration: 248 
## Processing iteration: 249 
## Processing iteration: 250 
## Processing iteration: 251 
## Processing iteration: 252 
## Processing iteration: 253 
## Processing iteration: 254 
## Processing iteration: 255 
## Processing iteration: 256 
## Processing iteration: 257 
## Processing iteration: 258 
## Processing iteration: 259 
## Processing iteration: 260 
## Processing iteration: 261 
## Processing iteration: 262 
## Processing iteration: 263 
## Processing iteration: 264 
## Processing iteration: 265 
## Processing iteration: 266 
## Processing iteration: 267 
## Processing iteration: 268 
## Processing iteration: 269 
## Processing iteration: 270 
## Processing iteration: 271 
## Processing iteration: 272 
## Processing iteration: 273 
## Processing iteration: 274 
## Processing iteration: 275 
## Processing iteration: 276 
## Processing iteration: 277 
## Processing iteration: 278 
## Processing iteration: 279 
## Processing iteration: 280 
## Processing iteration: 281 
## Processing iteration: 282 
## Processing iteration: 283 
## Processing iteration: 284 
## Processing iteration: 285 
## Processing iteration: 286 
## Processing iteration: 287 
## Processing iteration: 288 
## Processing iteration: 289 
## Processing iteration: 290 
## Processing iteration: 291 
## Processing iteration: 292 
## Processing iteration: 293 
## Processing iteration: 294 
## Processing iteration: 295 
## Processing iteration: 296 
## Processing iteration: 297 
## Processing iteration: 298 
## Processing iteration: 299 
## Processing iteration: 300 
## Processing iteration: 301 
## Processing iteration: 302 
## Processing iteration: 303 
## Processing iteration: 304 
## Processing iteration: 305 
## Processing iteration: 306 
## Processing iteration: 307 
## Processing iteration: 308 
## Processing iteration: 309 
## Processing iteration: 310 
## Processing iteration: 311 
## Processing iteration: 312 
## Processing iteration: 313 
## Processing iteration: 314 
## Processing iteration: 315 
## Processing iteration: 316 
## Processing iteration: 317 
## Processing iteration: 318 
## Processing iteration: 319 
## Processing iteration: 320 
## Processing iteration: 321 
## Processing iteration: 322 
## Processing iteration: 323 
## Processing iteration: 324 
## Processing iteration: 325 
## Processing iteration: 326 
## Processing iteration: 327 
## Processing iteration: 328 
## Processing iteration: 329 
## Processing iteration: 330 
## Processing iteration: 331 
## Processing iteration: 332 
## Processing iteration: 333 
## Processing iteration: 334 
## Processing iteration: 335 
## Processing iteration: 336 
## Processing iteration: 337 
## Processing iteration: 338 
## Processing iteration: 339 
## Processing iteration: 340 
## Processing iteration: 341 
## Processing iteration: 342 
## Processing iteration: 343 
## Processing iteration: 344 
## Processing iteration: 345 
## Processing iteration: 346 
## Processing iteration: 347 
## Processing iteration: 348 
## Processing iteration: 349 
## Processing iteration: 350 
## Processing iteration: 351 
## Processing iteration: 352 
## Processing iteration: 353 
## Processing iteration: 354 
## Processing iteration: 355 
## Processing iteration: 356 
## Processing iteration: 357 
## Processing iteration: 358 
## Processing iteration: 359 
## Processing iteration: 360 
## Processing iteration: 361 
## Processing iteration: 362 
## Processing iteration: 363 
## Processing iteration: 364 
## Processing iteration: 365 
## Processing iteration: 366 
## Processing iteration: 367 
## Processing iteration: 368 
## Processing iteration: 369 
## Processing iteration: 370 
## Processing iteration: 371 
## Processing iteration: 372 
## Processing iteration: 373 
## Processing iteration: 374 
## Processing iteration: 375 
## Processing iteration: 376 
## Processing iteration: 377 
## Processing iteration: 378 
## Processing iteration: 379 
## Processing iteration: 380 
## Processing iteration: 381 
## Processing iteration: 382 
## Processing iteration: 383 
## Processing iteration: 384 
## Processing iteration: 385 
## Processing iteration: 386 
## Processing iteration: 387 
## Processing iteration: 388 
## Processing iteration: 389 
## Processing iteration: 390 
## Processing iteration: 391 
## Processing iteration: 392 
## Processing iteration: 393 
## Processing iteration: 394 
## Processing iteration: 395 
## Processing iteration: 396 
## Processing iteration: 397 
## Processing iteration: 398 
## Processing iteration: 399 
## Processing iteration: 400 
## Processing iteration: 401 
## Processing iteration: 402 
## Processing iteration: 403 
## Processing iteration: 404 
## Processing iteration: 405 
## Processing iteration: 406 
## Processing iteration: 407 
## Processing iteration: 408 
## Processing iteration: 409 
## Processing iteration: 410 
## Processing iteration: 411 
## Processing iteration: 412 
## Processing iteration: 413 
## Processing iteration: 414 
## Processing iteration: 415 
## Processing iteration: 416 
## Processing iteration: 417 
## Processing iteration: 418 
## Processing iteration: 419 
## Processing iteration: 420 
## Processing iteration: 421 
## Processing iteration: 422 
## Processing iteration: 423 
## Processing iteration: 424 
## Processing iteration: 425 
## Processing iteration: 426 
## Processing iteration: 427 
## Processing iteration: 428 
## Processing iteration: 429 
## Processing iteration: 430 
## Processing iteration: 431 
## Processing iteration: 432 
## Processing iteration: 433 
## Processing iteration: 434 
## Processing iteration: 435 
## Processing iteration: 436 
## Processing iteration: 437 
## Processing iteration: 438 
## Processing iteration: 439 
## Processing iteration: 440 
## Processing iteration: 441 
## Processing iteration: 442 
## Processing iteration: 443 
## Processing iteration: 444 
## Processing iteration: 445 
## Processing iteration: 446 
## Processing iteration: 447 
## Processing iteration: 448 
## Processing iteration: 449 
## Processing iteration: 450 
## Processing iteration: 451 
## Processing iteration: 452 
## Processing iteration: 453 
## Processing iteration: 454 
## Processing iteration: 455 
## Processing iteration: 456 
## Processing iteration: 457 
## Processing iteration: 458 
## Processing iteration: 459 
## Processing iteration: 460 
## Processing iteration: 461 
## Processing iteration: 462 
## Processing iteration: 463 
## Processing iteration: 464 
## Processing iteration: 465 
## Processing iteration: 466 
## Processing iteration: 467 
## Processing iteration: 468 
## Processing iteration: 469 
## Processing iteration: 470 
## Processing iteration: 471 
## Processing iteration: 472 
## Processing iteration: 473 
## Processing iteration: 474 
## Processing iteration: 475 
## Processing iteration: 476 
## Processing iteration: 477 
## Processing iteration: 478 
## Processing iteration: 479 
## Processing iteration: 480 
## Processing iteration: 481 
## Processing iteration: 482 
## Processing iteration: 483 
## Processing iteration: 484 
## Processing iteration: 485 
## Processing iteration: 486 
## Processing iteration: 487 
## Processing iteration: 488 
## Processing iteration: 489 
## Processing iteration: 490 
## Processing iteration: 491 
## Processing iteration: 492 
## Processing iteration: 493 
## Processing iteration: 494 
## Processing iteration: 495 
## Processing iteration: 496 
## Processing iteration: 497 
## Processing iteration: 498 
## Processing iteration: 499 
## Processing iteration: 500 
## Processing iteration: 501 
## Processing iteration: 502 
## Processing iteration: 503 
## Processing iteration: 504 
## Processing iteration: 505 
## Processing iteration: 506 
## Processing iteration: 507 
## Processing iteration: 508 
## Processing iteration: 509 
## Processing iteration: 510 
## Processing iteration: 511 
## Processing iteration: 512 
## Processing iteration: 513 
## Processing iteration: 514 
## Processing iteration: 515 
## Processing iteration: 516 
## Processing iteration: 517 
## Processing iteration: 518 
## Processing iteration: 519 
## Processing iteration: 520 
## Processing iteration: 521 
## Processing iteration: 522 
## Processing iteration: 523 
## Processing iteration: 524 
## Processing iteration: 525 
## Processing iteration: 526 
## Processing iteration: 527 
## Processing iteration: 528 
## Processing iteration: 529 
## Processing iteration: 530 
## Processing iteration: 531 
## Processing iteration: 532 
## Processing iteration: 533 
## Processing iteration: 534 
## Processing iteration: 535 
## Processing iteration: 536 
## Processing iteration: 537 
## Processing iteration: 538 
## Processing iteration: 539 
## Processing iteration: 540 
## Processing iteration: 541 
## Processing iteration: 542 
## Processing iteration: 543 
## Processing iteration: 544 
## Processing iteration: 545 
## Processing iteration: 546 
## Processing iteration: 547 
## Processing iteration: 548 
## Processing iteration: 549 
## Processing iteration: 550 
## Processing iteration: 551 
## Processing iteration: 552 
## Processing iteration: 553 
## Processing iteration: 554 
## Processing iteration: 555 
## Processing iteration: 556 
## Processing iteration: 557 
## Processing iteration: 558 
## Processing iteration: 559 
## Processing iteration: 560 
## Processing iteration: 561 
## Processing iteration: 562 
## Processing iteration: 563 
## Processing iteration: 564 
## Processing iteration: 565 
## Processing iteration: 566 
## Processing iteration: 567 
## Processing iteration: 568 
## Processing iteration: 569 
## Processing iteration: 570 
## Processing iteration: 571 
## Processing iteration: 572 
## Processing iteration: 573 
## Processing iteration: 574 
## Processing iteration: 575 
## Processing iteration: 576 
## Processing iteration: 577 
## Processing iteration: 578 
## Processing iteration: 579 
## Processing iteration: 580 
## Processing iteration: 581 
## Processing iteration: 582 
## Processing iteration: 583 
## Processing iteration: 584 
## Processing iteration: 585 
## Processing iteration: 586 
## Processing iteration: 587 
## Processing iteration: 588 
## Processing iteration: 589 
## Processing iteration: 590 
## Processing iteration: 591 
## Processing iteration: 592 
## Processing iteration: 593 
## Processing iteration: 594 
## Processing iteration: 595 
## Processing iteration: 596 
## Processing iteration: 597 
## Processing iteration: 598 
## Processing iteration: 599 
## Processing iteration: 600 
## Processing iteration: 601 
## Processing iteration: 602 
## Processing iteration: 603 
## Processing iteration: 604 
## Processing iteration: 605 
## Processing iteration: 606 
## Processing iteration: 607 
## Processing iteration: 608 
## Processing iteration: 609 
## Processing iteration: 610 
## Processing iteration: 611 
## Processing iteration: 612 
## Processing iteration: 613 
## Processing iteration: 614 
## Processing iteration: 615 
## Processing iteration: 616 
## Processing iteration: 617 
## Processing iteration: 618 
## Processing iteration: 619 
## Processing iteration: 620 
## Processing iteration: 621 
## Processing iteration: 622 
## Processing iteration: 623 
## Processing iteration: 624 
## Processing iteration: 625 
## Processing iteration: 626 
## Processing iteration: 627 
## Processing iteration: 628 
## Processing iteration: 629 
## Processing iteration: 630 
## Processing iteration: 631 
## Processing iteration: 632 
## Processing iteration: 633 
## Processing iteration: 634 
## Processing iteration: 635 
## Processing iteration: 636 
## Processing iteration: 637 
## Processing iteration: 638 
## Processing iteration: 639 
## Processing iteration: 640 
## Processing iteration: 641 
## Processing iteration: 642 
## Processing iteration: 643 
## Processing iteration: 644 
## Processing iteration: 645 
## Processing iteration: 646 
## Processing iteration: 647 
## Processing iteration: 648 
## Processing iteration: 649 
## Processing iteration: 650 
## Processing iteration: 651 
## Processing iteration: 652 
## Processing iteration: 653 
## Processing iteration: 654 
## Processing iteration: 655 
## Processing iteration: 656 
## Processing iteration: 657 
## Processing iteration: 658 
## Processing iteration: 659 
## Processing iteration: 660 
## Processing iteration: 661 
## Processing iteration: 662 
## Processing iteration: 663 
## Processing iteration: 664 
## Processing iteration: 665 
## Processing iteration: 666 
## Processing iteration: 667 
## Processing iteration: 668 
## Processing iteration: 669 
## Processing iteration: 670 
## Processing iteration: 671 
## Processing iteration: 672 
## Processing iteration: 673 
## Processing iteration: 674 
## Processing iteration: 675 
## Processing iteration: 676 
## Processing iteration: 677 
## Processing iteration: 678 
## Processing iteration: 679 
## Processing iteration: 680 
## Processing iteration: 681 
## Processing iteration: 682 
## Processing iteration: 683 
## Processing iteration: 684 
## Processing iteration: 685 
## Processing iteration: 686 
## Processing iteration: 687 
## Processing iteration: 688 
## Processing iteration: 689 
## Processing iteration: 690 
## Processing iteration: 691 
## Processing iteration: 692 
## Processing iteration: 693 
## Processing iteration: 694 
## Processing iteration: 695 
## Processing iteration: 696 
## Processing iteration: 697 
## Processing iteration: 698 
## Processing iteration: 699 
## Processing iteration: 700 
## Processing iteration: 701 
## Processing iteration: 702 
## Processing iteration: 703 
## Processing iteration: 704 
## Processing iteration: 705 
## Processing iteration: 706 
## Processing iteration: 707 
## Processing iteration: 708 
## Processing iteration: 709 
## Processing iteration: 710 
## Processing iteration: 711 
## Processing iteration: 712 
## Processing iteration: 713 
## Processing iteration: 714 
## Processing iteration: 715 
## Processing iteration: 716 
## Processing iteration: 717 
## Processing iteration: 718 
## Processing iteration: 719 
## Processing iteration: 720 
## Processing iteration: 721 
## Processing iteration: 722 
## Processing iteration: 723 
## Processing iteration: 724 
## Processing iteration: 725 
## Processing iteration: 726 
## Processing iteration: 727 
## Processing iteration: 728 
## Processing iteration: 729 
## Processing iteration: 730 
## Processing iteration: 731 
## Processing iteration: 732 
## Processing iteration: 733 
## Processing iteration: 734 
## Processing iteration: 735 
## Processing iteration: 736 
## Processing iteration: 737 
## Processing iteration: 738 
## Processing iteration: 739 
## Processing iteration: 740 
## Processing iteration: 741 
## Processing iteration: 742 
## Processing iteration: 743 
## Processing iteration: 744 
## Processing iteration: 745 
## Processing iteration: 746 
## Processing iteration: 747 
## Processing iteration: 748 
## Processing iteration: 749 
## Processing iteration: 750 
## Processing iteration: 751 
## Processing iteration: 752 
## Processing iteration: 753 
## Processing iteration: 754 
## Processing iteration: 755 
## Processing iteration: 756 
## Processing iteration: 757 
## Processing iteration: 758 
## Processing iteration: 759 
## Processing iteration: 760 
## Processing iteration: 761 
## Processing iteration: 762 
## Processing iteration: 763 
## Processing iteration: 764 
## Processing iteration: 765 
## Processing iteration: 766 
## Processing iteration: 767 
## Processing iteration: 768 
## Processing iteration: 769 
## Processing iteration: 770 
## Processing iteration: 771 
## Processing iteration: 772 
## Processing iteration: 773 
## Processing iteration: 774 
## Processing iteration: 775 
## Processing iteration: 776 
## Processing iteration: 777 
## Processing iteration: 778 
## Processing iteration: 779 
## Processing iteration: 780 
## Processing iteration: 781 
## Processing iteration: 782 
## Processing iteration: 783 
## Processing iteration: 784 
## Processing iteration: 785 
## Processing iteration: 786 
## Processing iteration: 787 
## Processing iteration: 788 
## Processing iteration: 789 
## Processing iteration: 790 
## Processing iteration: 791 
## Processing iteration: 792 
## Processing iteration: 793 
## Processing iteration: 794 
## Processing iteration: 795 
## Processing iteration: 796 
## Processing iteration: 797 
## Processing iteration: 798 
## Processing iteration: 799 
## Processing iteration: 800 
## Processing iteration: 801 
## Processing iteration: 802 
## Processing iteration: 803 
## Processing iteration: 804 
## Processing iteration: 805 
## Processing iteration: 806 
## Processing iteration: 807 
## Processing iteration: 808 
## Processing iteration: 809 
## Processing iteration: 810 
## Processing iteration: 811 
## Processing iteration: 812 
## Processing iteration: 813 
## Processing iteration: 814 
## Processing iteration: 815 
## Processing iteration: 816 
## Processing iteration: 817 
## Processing iteration: 818 
## Processing iteration: 819 
## Processing iteration: 820 
## Processing iteration: 821 
## Processing iteration: 822 
## Processing iteration: 823 
## Processing iteration: 824 
## Processing iteration: 825 
## Processing iteration: 826 
## Processing iteration: 827 
## Processing iteration: 828 
## Processing iteration: 829 
## Processing iteration: 830 
## Processing iteration: 831 
## Processing iteration: 832 
## Processing iteration: 833 
## Processing iteration: 834 
## Processing iteration: 835 
## Processing iteration: 836 
## Processing iteration: 837 
## Processing iteration: 838 
## Processing iteration: 839 
## Processing iteration: 840 
## Processing iteration: 841 
## Processing iteration: 842 
## Processing iteration: 843 
## Processing iteration: 844 
## Processing iteration: 845 
## Processing iteration: 846 
## Processing iteration: 847 
## Processing iteration: 848 
## Processing iteration: 849 
## Processing iteration: 850 
## Processing iteration: 851 
## Processing iteration: 852 
## Processing iteration: 853 
## Processing iteration: 854 
## Processing iteration: 855 
## Processing iteration: 856 
## Processing iteration: 857 
## Processing iteration: 858 
## Processing iteration: 859 
## Processing iteration: 860 
## Processing iteration: 861 
## Processing iteration: 862 
## Processing iteration: 863 
## Processing iteration: 864 
## Processing iteration: 865 
## Processing iteration: 866 
## Processing iteration: 867 
## Processing iteration: 868 
## Processing iteration: 869 
## Processing iteration: 870 
## Processing iteration: 871 
## Processing iteration: 872 
## Processing iteration: 873 
## Processing iteration: 874 
## Processing iteration: 875 
## Processing iteration: 876 
## Processing iteration: 877 
## Processing iteration: 878 
## Processing iteration: 879 
## Processing iteration: 880 
## Processing iteration: 881 
## Processing iteration: 882 
## Processing iteration: 883 
## Processing iteration: 884 
## Processing iteration: 885 
## Processing iteration: 886 
## Processing iteration: 887 
## Processing iteration: 888 
## Processing iteration: 889 
## Processing iteration: 890 
## Processing iteration: 891 
## Processing iteration: 892 
## Processing iteration: 893 
## Processing iteration: 894 
## Processing iteration: 895 
## Processing iteration: 896 
## Processing iteration: 897 
## Processing iteration: 898 
## Processing iteration: 899 
## Processing iteration: 900 
## Processing iteration: 901 
## Processing iteration: 902 
## Processing iteration: 903 
## Processing iteration: 904 
## Processing iteration: 905 
## Processing iteration: 906 
## Processing iteration: 907 
## Processing iteration: 908 
## Processing iteration: 909 
## Processing iteration: 910 
## Processing iteration: 911 
## Processing iteration: 912 
## Processing iteration: 913 
## Processing iteration: 914 
## Processing iteration: 915 
## Processing iteration: 916 
## Processing iteration: 917 
## Processing iteration: 918 
## Processing iteration: 919 
## Processing iteration: 920 
## Processing iteration: 921 
## Processing iteration: 922 
## Processing iteration: 923 
## Processing iteration: 924 
## Processing iteration: 925 
## Processing iteration: 926 
## Processing iteration: 927 
## Processing iteration: 928 
## Processing iteration: 929 
## Processing iteration: 930 
## Processing iteration: 931 
## Processing iteration: 932 
## Processing iteration: 933 
## Processing iteration: 934 
## Processing iteration: 935 
## Processing iteration: 936 
## Processing iteration: 937 
## Processing iteration: 938 
## Processing iteration: 939 
## Processing iteration: 940 
## Processing iteration: 941 
## Processing iteration: 942 
## Processing iteration: 943 
## Processing iteration: 944 
## Processing iteration: 945 
## Processing iteration: 946 
## Processing iteration: 947 
## Processing iteration: 948 
## Processing iteration: 949 
## Processing iteration: 950 
## Processing iteration: 951 
## Processing iteration: 952 
## Processing iteration: 953 
## Processing iteration: 954 
## Processing iteration: 955 
## Processing iteration: 956 
## Processing iteration: 957 
## Processing iteration: 958 
## Processing iteration: 959 
## Processing iteration: 960 
## Processing iteration: 961 
## Processing iteration: 962 
## Processing iteration: 963 
## Processing iteration: 964 
## Processing iteration: 965 
## Processing iteration: 966 
## Processing iteration: 967 
## Processing iteration: 968 
## Processing iteration: 969 
## Processing iteration: 970 
## Processing iteration: 971 
## Processing iteration: 972 
## Processing iteration: 973 
## Processing iteration: 974 
## Processing iteration: 975 
## Processing iteration: 976 
## Processing iteration: 977 
## Processing iteration: 978 
## Processing iteration: 979 
## Processing iteration: 980 
## Processing iteration: 981 
## Processing iteration: 982 
## Processing iteration: 983 
## Processing iteration: 984 
## Processing iteration: 985 
## Processing iteration: 986 
## Processing iteration: 987 
## Processing iteration: 988 
## Processing iteration: 989 
## Processing iteration: 990 
## Processing iteration: 991 
## Processing iteration: 992 
## Processing iteration: 993 
## Processing iteration: 994 
## Processing iteration: 995 
## Processing iteration: 996 
## Processing iteration: 997 
## Processing iteration: 998 
## Processing iteration: 999 
## Processing iteration: 1000
# Combine results for all iterations
placebo_invention_df <- bind_rows(placebo_results_invention)
placebo_non_invention_df <- bind_rows(placebo_results_non_invention)

# Calculate t-statistic and p-value for invention model
placebo_invention_df <- placebo_invention_df %>%
  mutate(t = estimate / std.error,
         p = 2 * pt(abs(t), df = 8137, lower.tail = FALSE))

# Calculate t-statistic and p-value for non-invention model
placebo_non_invention_df <- placebo_non_invention_df %>%
  mutate(t = estimate / std.error,
         p = 2 * pt(abs(t), df = 8137, lower.tail = FALSE))

library(plotly)

# Scatter plot of p-values and estimates for 'Invention'
plot_invention_scatter <- plot_ly(placebo_invention_df, x = ~estimate, y = ~p, type = 'scatter', mode = 'markers') %>%
  layout(title = "Scatter Plot of Estimations vs P-Values (Invention)",
         xaxis = list(title = "Estimation"),
         yaxis = list(title = "P-Value"))

# Density plot for the coefficients of 'placebohsrfirm' in the invention model
plot_invention_density <- plot_ly(placebo_invention_df, x = ~estimate, type = 'histogram', histnorm = 'density', nbinsx = 50) %>%
  layout(title = "Density Plot of Estimations (Invention)",
         xaxis = list(title = "Estimation"),
         yaxis = list(title = "Probability Density"))

# Scatter plot of p-values and estimates for 'Non_Invention'
plot_non_invention_scatter <- plot_ly(placebo_non_invention_df, x = ~estimate, y = ~p, type = 'scatter', mode = 'markers') %>%
  layout(title = "Scatter Plot of Estimations vs P-Values (Non-Invention)",
         xaxis = list(title = "Estimation"),
         yaxis = list(title = "P-Value"))

# Density plot for the coefficients of 'placebohsrfirm' in the non-invention model
plot_non_invention_density <- plot_ly(placebo_non_invention_df, x = ~estimate, type = 'histogram', histnorm = 'density', nbinsx = 50) %>%
  layout(title = "Density Plot of Estimations (Non-Invention)",
         xaxis = list(title = "Estimation"),
         yaxis = list(title = "Probability Density"))

# To display the plots in an interactive way
combined_plot <- subplot(
  plot_invention_scatter, 
  plot_invention_density, 
  plot_non_invention_scatter, 
  plot_non_invention_density,
  nrows = 2,  # Arrange the plots in 2 rows
  titleX = TRUE, titleY = TRUE,  # Show axis titles for all plots
  margin = 0.05  # Add some margin between plots
) %>%
  layout(title = "Combined Plots: Invention and Non-Invention Models",
         showlegend = FALSE)

# Display the combined plot
combined_plot

References

Chen, Kejing, Qiaoshuang Meng, Yutao Sun, and Qingqing Wan. 2024. “How Does Industrial Policy Experimentation Influence Innovation Performance? A Case of Made in China 2025.” Humanities and Social Sciences Communications 11 (1): 1–17.